repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
thgcode/soundrts
|
soundrts/res.py
|
Python
|
bsd-3-clause
| 4,481
| 0.001562
|
"""SoundRTS resource manager"""
import os
from lib.resource import ResourceLoader
import config
import options
from paths import MAPS_PATHS
def get_all_packages_paths():
"""return the default "maps and mods" paths followed by the paths of the active packages"""
return MAPS_PATHS # + package_manager.get_packages_paths()
if options.mods is not None:
mods = options.mods
else:
mods = config.mods
_r = ResourceLoader(mods, config.soundpacks, get_all_packages_paths())
mods = _r.mods
soundpacks = _r.soundpacks
get_text_file = _r.get_text_file
load_texts = _r.load_texts
load_sounds = _r.load_sounds
def on_loading():
from lib.voice import voice
voice.item([4322, mods, "."]) # "loading"
def on_complete():
from lib.voice import voice
for mod in _r.unavailable_mods:
voice.alert([1029, 4330, mod])
def reload_all():
global mods, soundpacks
from clientmedia import sounds, update_display_caption
_r.update_mods_list(mods, soundpacks, get_all_packages_paths())
mods = _r.mods
soundpacks = _r.soundpacks
update_display_caption()
sounds.load_default(_r, on_loading, on_complete)
def set_mods(new_mods):
global mods
if new_mods != mods:
mods = new_mods
reload_all()
def set_soundpacks(new_soundpacks):
global soundpacks
if new_soundpacks != soundpacks:
soundpacks = new_soundpacks
reload_all()
# campaigns
def _get_campaigns():
from campaign import Campaign
w = []
for mp in get_all_packages_paths():
d = os.path.join(mp, "single")
if os.path.isdir(d):
for n in os.listdir(d):
p = os.path.join(d, n)
if os.path.isdir(p):
if n == "campaign":
w.append(Campaign(p, [4267]))
else:
w.append(Campaign(p))
return w
_campaigns = None
_mods_at_the_previous_campaigns_update = None
def campaigns():
global _campaigns, _mods_at_the_previous_campaigns_update
if _campaigns is None or _mods_at_the_previous_campaigns_update != mods:
_campaigns = _get_campaigns()
_mods_at_the_previous_campaigns_update = mods
return _campaigns
# multiplayer maps
def _add_official_multi(w):
from mapfile import Map
maps = [line.strip().split() for line in open("cfg/official_maps.txt")]
for n, digest in maps:
p = o
|
s.path.join("multi", n)
w.append(Map(p, digest, official=True))
def _add_custom_multi(w):
from mapfile import Map
for mp in get_all_packages_pa
|
ths():
d = os.path.join(mp, "multi")
if os.path.isdir(d):
for n in os.listdir(d):
p = os.path.join(d, n)
if os.path.normpath(p) not in (os.path.normpath(x.path) for x in w):
w.append(Map(p, None))
def _move_recommended_maps(w):
from definitions import Style
style = Style()
style.load(get_text_file("ui/style", append=True, localize=True))
for n in reversed(style.get("parameters", "recommended_maps")):
for m in reversed(w[:]): # reversed so the custom map is after the official map
if m.get_name()[:-4] == n:
w.remove(m)
w.insert(0, m)
def _get_worlds_multi():
w = []
_add_official_multi(w)
_add_custom_multi(w)
_move_recommended_maps(w)
return w
_multi_maps = None
_mods_at_the_previous_multi_maps_update = None
def worlds_multi():
global _multi_maps, _mods_at_the_previous_multi_maps_update
if _multi_maps is None or _mods_at_the_previous_multi_maps_update != mods:
_multi_maps = _get_worlds_multi()
_mods_at_the_previous_multi_maps_update = mods
return _multi_maps
# mods
def is_a_soundpack(path):
for name in ("rules.txt", "ai.txt"):
if os.path.isfile(os.path.join(path, name)):
return False
return True
def is_a_mod(path):
return not is_a_soundpack(path)
def available_mods(check_mod_type=is_a_mod):
result = []
for path in get_all_packages_paths():
mods_path = os.path.join(path, "mods")
for mod in os.listdir(mods_path):
path = os.path.join(mods_path, mod)
if os.path.isdir(path) \
and check_mod_type(path) \
and mod not in result:
result.append(mod)
return result
def available_soundpacks():
return available_mods(is_a_soundpack)
|
golismero/golismero
|
tools/sqlmap/waf/knownsec.py
|
Python
|
gpl-2.0
| 511
| 0.003914
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
|
"""
import re
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "KS-WAF (Knownsec)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = re.search(r"url\('/ks-waf-error\.png'\)", page, re.I) is not None
if retval:
|
break
return retval
|
carefree0910/MachineLearning
|
f_NN/Optimizers.py
|
Python
|
mit
| 3,492
| 0.002864
|
import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import numpy as np
from Util.Metas import TimingMeta
class Optimizer:
def __init__(self, lr=0.01, cache=None):
self.lr = lr
self._cache = cache
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return str(self)
def feed_variables(self, variables):
self._cache = [
np.zeros(var.shape) for var in variables
]
def run(self, i, dw):
pass
def update(self):
pass
class MBGD(Optimizer, metaclass=TimingMeta):
def run(self, i, dw):
return self.lr * dw
class Momentum(Optimizer, metaclass=TimingMeta):
def __init__(self, lr=0.01, cache=None, epoch=100, floor=0.5, ceiling=0.999):
Optimizer.__init__(self, lr, cache)
self._momentum = floor
self._step = (ceiling - floor) / epoch
self._floor, self._ceiling = floor, ceiling
self._is_nesterov = False
def run(self, i, dw):
dw *= self.lr
velocity = self._cache
velocity[i] *= self._momentum
velocity[i] += dw
if not self._is_nesterov:
return velocity[i]
return self._momentum * velocity[i] + dw
def update(self):
if self._momentum < self._ceiling:
self._momentum += self._step
class NAG(Momentum, metaclass=TimingMeta):
def __init__(self, lr=0.01, cache=None, epoch=100, floor=0.5, ceiling=0.999):
Momentum.__init__(self, lr, cache, epoch, floor, ceiling)
self._is_nesterov = True
class RMSProp(Optimizer, metaclass=TimingMet
|
a):
def __init__(self, lr=0.01, cache=None, decay_rate=0.9, eps=1e-8):
Optimizer.__init__(self, lr, cache)
self.decay_rate, self.eps = decay_rate, eps
def run(self, i, dw):
self._cache[i] = s
|
elf._cache[i] * self.decay_rate + (1 - self.decay_rate) * dw ** 2
return self.lr * dw / (np.sqrt(self._cache[i] + self.eps))
class Adam(Optimizer, metaclass=TimingMeta):
def __init__(self, lr=0.01, cache=None, beta1=0.9, beta2=0.999, eps=1e-8):
Optimizer.__init__(self, lr, cache)
self.beta1, self.beta2, self.eps = beta1, beta2, eps
def feed_variables(self, variables):
self._cache = [
[np.zeros(var.shape) for var in variables],
[np.zeros(var.shape) for var in variables],
]
def run(self, i, dw):
self._cache[0][i] = self._cache[0][i] * self.beta1 + (1 - self.beta1) * dw
self._cache[1][i] = self._cache[1][i] * self.beta2 + (1 - self.beta2) * (dw ** 2)
return self.lr * self._cache[0][i] / (np.sqrt(self._cache[1][i] + self.eps))
# Factory
class OptFactory:
available_optimizers = {
"MBGD": MBGD, "Momentum": Momentum, "NAG": NAG, "RMSProp": RMSProp, "Adam": Adam,
}
def get_optimizer_by_name(self, name, variables, lr, epoch=100):
try:
optimizer = self.available_optimizers[name](lr)
if variables is not None:
optimizer.feed_variables(variables)
if epoch is not None and isinstance(optimizer, Momentum):
optimizer.epoch = epoch
return optimizer
except KeyError:
raise NotImplementedError("Undefined Optimizer '{}' found".format(name))
|
frappe/erpnext
|
erpnext/regional/doctype/e_invoice_request_log/test_e_invoice_request_log.py
|
Python
|
gpl-3.0
| 241
| 0.004149
|
#
|
-*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestEInvoiceRequestLog(unittest.TestCase):
pass
| |
GoogleCloudPlatform/cloud-data-quality
|
clouddq/log.py
|
Python
|
apache-2.0
| 3,052
| 0
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from logging import Logger
import dataclasses
import json
import logging
import sys
from google.cloud.logging.handlers import CloudLoggingHandler
import google.cloud.logging # Don't conflict with standard logging
APP_VERSION = "0.5.2"
APP_NAME = "clouddq"
LOG_LEVEL = logging._nameToLevel["DEBUG"]
class JsonEncoderStrFallback(json.JSONEncoder):
def default(self, obj):
try:
if dataclasses.is_dataclass(obj):
return dataclasses.asdict(obj)
else:
return super().default(obj)
except TypeError as exc:
if "not JSON serializable" in str(exc):
return str(obj)
raise
class JsonEncoderDatetime(JsonEncoderStrFallback):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
else:
return super().default(obj)
class JSONFormatter(logging.Formatter):
def __init__(self):
super().__init__()
def format(self, record):
try:
message = json.loads(record.getMessage())
except ValueError:
message = record.getMessage()
record.msg = json.dumps(message, cls=JsonEncoderDatetime)
return super().format(record)
def add_cloud_logging_handler(logger: Logger):
client = google.cloud.logging.Client()
handler = CloudL
|
oggingHandler(
client=client,
name="clouddq",
labels={
"name": APP_NAME,
|
"releaseId": APP_VERSION,
},
)
handler.setFormatter(JSONFormatter())
logger.addHandler(handler)
def get_json_logger():
json_logger = logging.getLogger("clouddq-json-logger")
if not len(json_logger.handlers):
json_logger.setLevel(LOG_LEVEL)
logging_stream_handler = logging.StreamHandler(sys.stdout)
logging_stream_handler.setFormatter(JSONFormatter())
json_logger.addHandler(logging_stream_handler)
return json_logger
def get_logger():
logger = logging.getLogger("clouddq")
if not len(logger.handlers):
logger.setLevel(LOG_LEVEL)
logging_stream_handler = logging.StreamHandler(sys.stderr)
stream_formatter = logging.Formatter(
"{asctime} {name} {levelname:8s} {message}", style="{"
)
logging_stream_handler.setFormatter(stream_formatter)
logger.addHandler(logging_stream_handler)
return logger
|
cxxgtxy/tensorflow
|
tensorflow/python/ops/linalg/linear_operator_kronecker.py
|
Python
|
apache-2.0
| 23,700
| 0.005148
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Construct the Kronecker product of one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorKronecker"]
def _vec(x):
"""Stacks column of matrix to form a single column."""
return array_ops.reshape(
array_ops.matrix_transpose(x),
array_ops.concat(
[array_ops.shape(x)[:-2], [-1]], axis=0))
def _unvec_by(y, num_col):
"""Unstack vector to form a matrix, with a specified amount of columns."""
return array_ops.matrix_transpose(
array_ops.reshape(
y,
array_ops.concat(
[array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))
def _rotate_last_dim(x, rotate_right=False):
"""Rotate the last dimension either left or right."""
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat(
[[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)
else:
transpose_perm = array_ops.concat(
[math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm)
@tf_export("linalg.LinearOperatorKronecker")
class LinearOperatorKronecker(linear_operator.LinearOperator):
"""Kronecker product between two `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` representing the Kronecker product:
`op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is
associative).
If `opj` has shape `batch_shape_j + [M_j, N_j]`, then the composed operator
will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`,
where the product is over all operators.
```python
# Create a 4 x 4 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]])
operator = LinearOperatorKronecker([operator_1, operator_2])
operator.to_dense()
==> [[1., 0., 2., 0.],
[2., 1., 4., 2.],
[3., 0., 4., 0.],
[6., 3., 8., 4.]]
operator.shape
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [4, 2] Tensor
operator.matmul(x)
==> Shape [4, 2] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random.normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random.normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 20 x 30 operators.
operator_large = LinearOperatorKronecker([operator_45, operator_56])
# Create a shape [2, 3, 20, 2] vector.
x = tf.random.normal(shape=[2, 3, 6, 2])
operator_large.matmul(x)
==> Shape [2, 3, 30, 2] Tensor
``
|
`
#### Performance
The performance of `LinearOperatorKronecker` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`
|
.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorKronecker`.
`LinearOperatorKronecker` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape, representing the Kronecker
factors.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_x_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
parameters = dict(
operators=operators,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a list of >=1 operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
# A Kronecker product is invertible, if and only if all factors are
# invertible.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The Kronecker product of non-singular operators is always "
"non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The Kronecker product of self-adjoint operators is always "
"self-adjoint.")
is_self_adjoint = True
# The eigenvalues of a Kronecker product are equal to the products of eigen
# values of the corresponding factors.
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError("The Kronecker product of positive-definite operators "
"is always positive-definite.")
is_positive_definite = True
# Initialization.
graph_parents = [
|
Alecardv/College-projects
|
2048/Control.py
|
Python
|
gpl-3.0
| 1,336
| 0.026946
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 08 13:25:40 2015
@author: J. Alejandro Cardona
"""
from Board import *
import pygame
UP, LEFT, DOWN, RIGHT = 1, 2, 3, 4
juego = Board()
_2 = pygame.image.load("2.jpg"); _2re = _2.get_rect()
_4 = pygame.image.load("4.jpg"); _4re = _4.get_rect()
_8 = pygame.image.load("8.jpg"); _8re = _8.get_rect()
_16 = pygame.image.load("16.jpg"); _16re = _16.get_rect()
_32 = pygame.image.load("32.jpg"); _32re = _32.get_rect()
_64 = pygame.image.load("64.jpg"); _64re = _64.get_rect()
_128 = pygame.image.load("128.jpg"); _128re = _128.get_rect()
_256 = pygame.image.load("256.jpg"); _256re = _256.get_rect()
_512 = pygame.image.load("512.j
|
pg"); _512re = _512.get_rect()
_1024 = pygame.image.load("1024.jpg")
|
; _1024re = _1024.get_rect()
_2048 = pygame.image.load("2048.jpg"); _2048re = _2048.get_rect()
figs = {2:(_2, _2re), 4:(_4,_4re), 8:(_8,_8re), 16:(_16,_16re),
32:(_32,_32re), 64:(_64,_64re), 128:(_128,_128re), 256:(_256,_256re),
512:(_512,_512re), 1024:(_1024,_1024re), 2048:(_2048,_2048re)}
def read_key(key):
# Este metodo se usa solo para jugar en modo consola
if key == 'w':
juego.move(UP)
elif key == 's':
juego.move(DOWN)
elif key == 'a':
juego.move(LEFT)
elif key == 'd':
juego.move(RIGHT)
|
RussianPenguin/dailyprogrammer
|
easy/140.py
|
Python
|
gpl-2.0
| 1,049
| 0.042898
|
import re
def readConversion():
string = ''
conversion = []
try:
# obtain conversion pattern
raw = raw_input()
while not raw:
raw = raw_input()
conversion = map(int, raw.split())
# obtain string
string = raw_input()
while not string:
string = raw_input()
finally:
return conversion, string
def converter(string, outForm, inputForm = -1):
inputFinder = {
-1: re.compile('(\S+)'),
0: re.compile('((?:^|[A-Z])[a-z]*)'),
1: re.compile('([^_]+)'),
2: re.compile('([^_]+)')
}
outputCompiler = {
0: lambda x: x[0].lower() + "".join(map(lambda x: x.lower().title(), x[1:])),
1: lambda x: "_".join(map(lambda x: x.lower(), x)),
2: lambda x: "_".join(map(lambda x: x.upper(), x))
}
inputData = inputFinder[inputFo
|
rm].findall(string)
outputData = outputCompiler[outForm](inputData)
return outputData
if __name__ == '__main_
|
_':
while True:
conversion, string = readConversion()
if len(conversion) >= 1:
print (conversion[::-1])[0]
print converter(string, *(conversion[::-1]))
else:
break
|
pollen/pyrobus
|
pyluos/modules/servo.py
|
Python
|
mit
| 1,559
| 0
|
from __future__ import division
from .module import Module, interact
class Servo(Module):
def __init__(self, id, alias, device):
Module.__init__(self, 'Servo', id, alias, device)
self._max_angle = 180.0
self._min_pulse = 0.0005
self._max_pulse = 0.0015
self._angle = 0.0
@property
def rot_position(self):
return self._angle
@rot_position.setter
def rot_position(self, new_pos):
self._angle = new_pos
self._push_value('target_rot_position', new_pos)
@property
def max_angle(self):
return self._max_angle
@max_angle.setter
def max_angle(self, new):
self._max_angle = new
param = [self._max_angle, self._min_pulse, self._max_pulse]
|
self._push_value('parameters', param)
@property
def min_pulse(self):
return self._min_pulse
@min_pulse.setter
def min_pulse(self, new):
self._min_pulse = new
param = [self._max_angle, self._min_pulse, self._max_pulse]
self._push_value('parameters', param)
@property
def max_pulse(self):
return self._max_pulse
@max_pulse.setter
def max_pulse(self, new):
self._max_pulse = new
p
|
aram = [self._max_angle, self._min_pulse, self._max_pulse]
self._push_value('parameters', param)
def _update(self, new_state):
Module._update(self, new_state)
def control(self):
def move(position):
self.position = position
return interact(move, position=(0, 180, 1))
|
reeshupatel/demo
|
keystone/tests/test_revoke.py
|
Python
|
apache-2.0
| 17,990
| 0.000056
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing pe
|
rmissions and limitations
# under the License.
|
import datetime
import uuid
import mock
from keystone.common import dependency
from keystone import config
from keystone.contrib.revoke import model
from keystone import exception
from keystone.openstack.common import timeutils
from keystone import tests
from keystone.tests import test_backend_sql
CONF = config.CONF
def _new_id():
return uuid.uuid4().hex
def _future_time():
expire_delta = datetime.timedelta(seconds=1000)
future_time = timeutils.utcnow() + expire_delta
return future_time
def _past_time():
expire_delta = datetime.timedelta(days=-1000)
past_time = timeutils.utcnow() + expire_delta
return past_time
def _sample_blank_token():
issued_delta = datetime.timedelta(minutes=-2)
issued_at = timeutils.utcnow() + issued_delta
token_data = model.blank_token_data(issued_at)
return token_data
def _matches(event, token_values):
"""See if the token matches the revocation event.
Used as a secondary check on the logic to Check
By Tree Below: This is abrute force approach to checking.
Compare each attribute from the event with the corresponding
value from the token. If the event does not have a value for
the attribute, a match is still possible. If the event has a
value for the attribute, and it does not match the token, no match
is possible, so skip the remaining checks.
:param event one revocation event to match
:param token_values dictionary with set of values taken from the
token
:returns if the token matches the revocation event, indicating the
token has been revoked
"""
# The token has three attributes that can match the user_id
if event.user_id is not None:
for attribute_name in ['user_id', 'trustor_id', 'trustee_id']:
if event.user_id == token_values[attribute_name]:
break
else:
return False
# The token has two attributes that can match the domain_id
if event.domain_id is not None:
for attribute_name in ['user_domain_id', 'project_domain_id']:
if event.domain_id == token_values[attribute_name]:
break
else:
return False
# If any one check does not match, the while token does
# not match the event. The numerous return False indicate
# that the token is still valid and short-circuits the
# rest of the logic.
attribute_names = ['project_id',
'expires_at', 'trust_id', 'consumer_id',
'access_token_id']
for attribute_name in attribute_names:
if getattr(event, attribute_name) is not None:
if (getattr(event, attribute_name) !=
token_values[attribute_name]):
return False
if event.role_id is not None:
roles = token_values['roles']
for role in roles:
if event.role_id == role:
break
else:
return False
if token_values['issued_at'] > event.issued_before:
return False
return True
@dependency.requires('revoke_api')
class RevokeTests(object):
def test_list(self):
self.revoke_api.revoke_by_user(user_id=1)
self.assertEqual(1, len(self.revoke_api.get_events()))
self.revoke_api.revoke_by_user(user_id=2)
self.assertEqual(2, len(self.revoke_api.get_events()))
def test_list_since(self):
self.revoke_api.revoke_by_user(user_id=1)
self.revoke_api.revoke_by_user(user_id=2)
past = timeutils.utcnow() - datetime.timedelta(seconds=1000)
self.assertEqual(2, len(self.revoke_api.get_events(past)))
future = timeutils.utcnow() + datetime.timedelta(seconds=1000)
self.assertEqual(0, len(self.revoke_api.get_events(future)))
def test_past_expiry_are_removed(self):
user_id = 1
self.revoke_api.revoke_by_expiration(user_id, _future_time())
self.assertEqual(1, len(self.revoke_api.get_events()))
event = model.RevokeEvent()
event.revoked_at = _past_time()
self.revoke_api.revoke(event)
self.assertEqual(1, len(self.revoke_api.get_events()))
@mock.patch.object(timeutils, 'utcnow')
def test_expired_events_removed_validate_token_success(self, mock_utcnow):
def _sample_token_values():
token = _sample_blank_token()
token['expires_at'] = timeutils.isotime(_future_time(),
subsecond=True)
return token
now = datetime.datetime.utcnow()
now_plus_2h = now + datetime.timedelta(hours=2)
mock_utcnow.return_value = now
# Build a token and validate it. This will seed the cache for the
# future 'synchronize' call.
token_values = _sample_token_values()
user_id = _new_id()
self.revoke_api.revoke_by_user(user_id)
token_values['user_id'] = user_id
self.assertRaises(exception.TokenNotFound,
self.revoke_api.check_token,
token_values)
# Move our clock forward by 2h, build a new token and validate it.
# 'synchronize' should now be exercised and remove old expired events
mock_utcnow.return_value = now_plus_2h
self.revoke_api.revoke_by_expiration(_new_id(), now_plus_2h)
# should no longer throw an exception
self.revoke_api.check_token(token_values)
class SqlRevokeTests(test_backend_sql.SqlTests, RevokeTests):
def config_overrides(self):
super(SqlRevokeTests, self).config_overrides()
self.config_fixture.config(
group='revoke',
driver='keystone.contrib.revoke.backends.sql.Revoke')
self.config_fixture.config(
group='token',
provider='keystone.token.providers.pki.Provider',
revoke_by_id=False)
class KvsRevokeTests(tests.TestCase, RevokeTests):
def config_overrides(self):
super(KvsRevokeTests, self).config_overrides()
self.config_fixture.config(
group='revoke',
driver='keystone.contrib.revoke.backends.kvs.Revoke')
self.config_fixture.config(
group='token',
provider='keystone.token.providers.pki.Provider',
revoke_by_id=False)
def setUp(self):
super(KvsRevokeTests, self).setUp()
self.load_backends()
class RevokeTreeTests(tests.TestCase):
def setUp(self):
super(RevokeTreeTests, self).setUp()
self.events = []
self.tree = model.RevokeTree()
self._sample_data()
def _sample_data(self):
user_ids = []
project_ids = []
role_ids = []
for i in range(0, 3):
user_ids.append(_new_id())
project_ids.append(_new_id())
role_ids.append(_new_id())
project_tokens = []
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[0]
project_tokens[i]['project_id'] = project_ids[0]
project_tokens[i]['roles'] = [role_ids[1]]
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[1]
project_tokens[i]['project_id'] = project_ids[0]
project_tokens[i]['roles'] = [role_ids[0]]
i = len(project_tokens)
project_tokens.append(_sample_blank_token())
project_tokens[i]['user_id'] = user_ids[0]
project_tokens[i]['project_id'] = project_ids[1]
project_tokens[i]['roles'] = [role_i
|
nkgilley/home-assistant
|
homeassistant/components/toon/const.py
|
Python
|
apache-2.0
| 11,757
| 0
|
"""Constants for the Toon integration."""
from datetime import timedelta
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_PROBLEM,
)
from homeassistant.components.sensor import DEVICE_CLASS_POWER
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_NAME,
ATTR_UNIT_OF_MEASUREMENT,
ENERGY_KILO_WATT_HOUR,
POWER_WATT,
UNIT_PERCENTAGE,
)
DOMAIN = "toon"
CONF_AGREEMENT = "agreement"
CONF_AGREEMENT_ID = "agreement_id"
CONF_CLOUDHOOK_URL = "cloudhook_url"
CONF_MIGRATE = "migrate"
DEFAULT_SCAN_INTERVAL = timedelta(seconds=300)
DEFAULT_MAX_TEMP = 30.0
DEFAULT_MIN_TEMP = 6.0
CURRENCY_EUR = "EUR"
VOLUME_CM3 = "CM3"
VOLUME_M3 = "M3"
ATTR_DEFAULT_ENABLED = "default_enabled"
ATTR_INVERTED = "inverted"
ATTR_MEASUREMENT = "measurement"
ATTR_SECTION = "section"
BINARY_SENSOR_ENTITIES = {
"thermostat_info_boiler_connected_None": {
ATTR_NAME: "Boiler Module Connection",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "boiler_module_connected",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: DEVICE_CLASS_CONNECTIVITY,
ATTR_ICON: "mdi:check-network-outline",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_burner_info_1": {
ATTR_NAME: "Boiler Heating",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "heating",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:fire",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_burner_info_2": {
ATTR_NAME: "Hot Tap Water",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "hot_tapwater",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:water-pump",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_info_burner_info_3": {
ATTR_NAME: "Boiler Preheating",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "pre_heating",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:fire",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_burner_info_None": {
ATTR_NAME: "Boiler Burner",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "burner",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:fire",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_info_error_found_255": {
ATTR_NAME: "Boiler Status",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "error_found",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
ATTR_ICON: "mdi:alert",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_info_ot_communication_error_0": {
ATTR_NAME: "OpenTherm Connection",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "opentherm_communication_error",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
ATTR_ICON: "mdi:check-network-outline",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_program_overridden": {
ATTR_NAME: "Thermostat Program Override",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "program_overridden",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gesture-tap",
ATTR_DEFAULT_ENABLED: True,
},
}
SENSOR_ENTITIES = {
"gas_average": {
ATTR_NAME: "Average Gas Usage",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "average",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_CM3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"gas_average_daily": {
ATTR_NAME: "Average Daily Gas Usage",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "day_average",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_M3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: False,
},
"gas_daily_usage": {
ATTR_NAME: "Gas Usage Today",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "day_usage",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_M3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"gas_daily_cost": {
ATTR_NAME: "Gas Cost Today",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "day_cost",
ATTR_UNIT_OF_MEASUREMENT: CURRENCY_EUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"gas_meter_reading": {
ATTR_NAME: "Gas Meter",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "meter",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_M3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: False,
},
"gas_value": {
ATTR_NAME: "Current Gas Usage",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "current",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_CM3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"power_average": {
ATTR_NAME: "Average Power Usage",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "average",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_average_daily": {
ATTR_NAME: "Average Daily Energy Usage",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_average",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_daily_cost": {
ATTR_NAME: "Energy Cost Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_cost",
ATTR_UNIT_OF_MEASUREMENT: CURRENCY_EUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: True,
},
"power_daily_value": {
ATTR_NAME: "Energy Usage Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_usage",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: True,
},
"power_meter_reading": {
ATTR_NAME: "Electricity Meter Feed IN Tariff 1",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_high",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_meter_reading_low": {
ATTR_NAME: "Electricity Meter Feed IN Tariff 2",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_high",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KIL
|
O_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_value": {
ATTR_NAME: "Current Powe
|
r Usage",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "current",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: True,
},
"solar_meter_reading_produced": {
ATTR_NAME: "Electricity Meter Feed OUT Tariff 1",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_produced_high",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"solar_meter_reading_low_produced": {
ATTR_NAME: "Electricity Meter Feed OUT Tariff 2",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_produced_low",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"solar_value": {
ATTR_NAME:
|
archesproject/arches
|
tests/models/mapped_csv_import_tests.py
|
Python
|
agpl-3.0
| 6,782
| 0.003096
|
"""
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from operator import itemgetter
from tests import test_settings
from tests.base_test import ArchesTestCase
from django.core import management
from arches.app.models.models import TileModel, ResourceInstance
from arches.app.models.concept import Concept
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from arches.app.utils.skos import SKOSReader
from arches.app.search.search_engine_factory import SearchEngineFactory
from arches.app.utils.data_management.resource_graphs.importer import import_graph as ResourceGraphImporter
from arches.app.utils.data_management.resources.importer import BusinessDataImporter
# these tests can be run from the command line via
# python manage.py test tests/models/mapped_csv_import_tests.py --pattern="*.py" --settings="tests.test_settings"
class mappedCSVFileImportTests(ArchesTestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
ResourceInstance.objects.all().delete()
skos = SKOSReader()
rdf = skos.read_file("tests/fixtures/data/concept_label_test_scheme.xml")
ret = skos.save_concepts_from_skos(rdf)
skos = SKOSReader()
rdf = skos.read_file("tests/fixtures/data/concept_label_test_collection.xml")
ret = skos.save_concepts_from_skos(rdf)
with open(os.path.join("tests/fixtures/data/json/cardinality_test_data/target.json"), "rU") as f:
archesfile = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile["graph"])
with open(os.path.join("tests/fixtures/data/json/cardinality_test_data/file-list.json"), "rU") as f:
archesfile = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile["graph"])
@classmethod
def tearDownClass(cls):
pass
def test_single_1(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/cardinality_test_data/single-1_to_1.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 1)
def test_single_n_to_n(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/cardinality_test_data/single-n_to_n.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 2)
def test_single_n_to_1(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/cardinality_test_data/single-n_to_1.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 1)
def test_1_1(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/cardinality_test_data/1-1.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 2)
def test_1_n(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/cardinality_test_data/1-n.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 3)
def test_n_1(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/cardinality_test_data/n-1.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 4)
def test_n_n(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/cardinality_test_data/n-n.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 6)
def test_domain_label_import(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/domain_label_import.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 1)
def test_concept_label_import(self):
og_tile_count =
|
TileModel.objects.count()
BusinessDataImporte
|
r("tests/fixtures/data/csv/concept_label_import.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 1)
def test_required_node_import(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/required_node_import.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 0)
def test_required_child_node_import(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/required_child_node_import.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 0)
def test_file_list_datatype_import(self):
og_tile_count = TileModel.objects.count()
BusinessDataImporter("tests/fixtures/data/csv/file_list_datatype_import.csv").import_business_data()
new_tile_count = TileModel.objects.count()
tile_difference = new_tile_count - og_tile_count
self.assertEqual(tile_difference, 1)
|
mindnervestech/mnrp
|
openerp/service/server.py
|
Python
|
agpl-3.0
| 36,292
| 0.002618
|
#-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import psutil
import random
if os.name == 'posix':
import resource
else:
resource = None
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import unittest2
import werkzeug.serving
try:
import fcntl
except ImportError:
pass
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import openerp
from openerp.modules.registry import RegistryManager
from openerp.release import nt_service_name
import openerp.tools.config as config
from openerp.tools.misc import stripped_sys_argv, dumpstacks
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class LoggingBaseWSGIServerMixIn(object):
def handle_error(self, request, client_address):
t, e, _ = sys.exc_info()
if t == socket.error and e.errno == errno.EPIPE:
# broken pipe, ignore error
return
_logger.exception('Exception happened during processing of request from %s', client_address)
class BaseWSGIServerNoBind(LoggingBaseWSGIServerMixIn, werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
def server_bind(self):
# we dont bind beause we use the listen socket of PreforkServer#socket
# instead we close the socket
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
class RequestHandler(werkzeug.serving.WSGIRequestHandler):
def setup(self):
# flag the current thread as handling a http request
super(RequestHandler, self).setup()
me = threading.currentThread()
me.name = 'openerp.service.http.request.%s' % (me.ident,)
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(LoggingBaseWSGIServerMixIn, werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def __init__(self, host, port, app):
super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
handler=RequestHandler)
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# AutoReload watcher
#----------------------------------------------------------
class AutoReload(object):
def __init__(self, server):
self.server = server
self.files = {}
self.modules = {}
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, autoreload):
self.autoreload = autoreload
def process_IN_CREATE(self, event):
_logger.debug('File created: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
def process_IN_MODIFY(self, event):
_logger.debug('File modified: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
self.wm = pyinotify.WatchManager()
self.handler = EventHandler(self)
self.notifier = pyinotify.Notifier(self.wm, self.handler, timeout=0)
mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE # IN_MOVED_FROM, IN_MOVED_TO ?
for path in openerp.modules.module.ad_paths:
_logger.info('Watching addons folder %s', path)
self.wm.add_watch(path, mask, rec=True)
def process_data(self, files):
xml_files = [i for i in files if i.endswith('.xml')]
for i in xml_files:
for path in openerp.modules.module.ad_paths:
if i.startswith(path):
# find out wich addons path the file belongs to
# and extract it's module name
right = i[len(path) + 1:].split('/')
if len(right) < 2:
continue
module = right[0]
self.modules[module] = 1
if self.modules:
_logger.info('autoreload: xml change detected, autoreload activated')
restart()
def process_python(self, files):
# process python changes
py_files = [i for i in files if i.endswith('.py')]
py_errors = []
# TODO keep python errors until they are ok
if py_files:
for i in py_files:
try:
source = open(i, 'rb').read() + '\n'
compile(source, i, 'exec')
except Synt
|
axError:
py_errors.append(i)
if py_errors
|
:
_logger.info('autoreload: python code change detected, errors found')
for i in py_errors:
_logger.info('autoreload: SyntaxError %s', i)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def check_thread(self):
# Check if some files have been touched in the addons path.
# If true, check if the touched file belongs to an installed module
# in any of the database used in the registry manager.
while 1:
while self.notifier.check_events(1000):
self.notifier.read_events()
self.notifier.process_events()
l = self.files.keys()
self.files.clear()
self.process_data(l)
self.process_python(l)
def run(self):
t = threading.Thread(target=self.check_thread)
t.setDaemon(True)
t.start()
_logger.info('AutoReload watcher running')
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
|
Rostlab/nalaf
|
nalaf/features/relations/__init__.py
|
Python
|
apache-2.0
| 10,867
| 0.005245
|
import abc
from nalaf.features import FeatureGenerator
import re
from nalaf import print_debug, print_verbose
class EdgeFeatureGenerator(FeatureGenerator):
"""
Abstract class for generating features for each edge in the dataset.
Subclasses that inherit this class should:
* Be named [Name]FeatureGenerator
* Implement the abstract method generate
* Append new items to the dictionary field "features" of each Edge in the dataset
"""
@abc.abstractmethod
def generate(self, dataset, feature_set, use_gold=True, use_pred=False):
"""
:type dataset: nalaf.structures.data.Dataset
"""
pass
def add_to_feature_set(self, feature_set, edge, feature_name, value=1):
"""
Return True if feature was added to feature_set. False, otherwise
If the feature_name is None, the feature is not added in anycase. See: self.mk_feature_name
"""
if feature_name is None:
return False
else:
feature_name = self.__set_final_name(feature_name)
if not feature_set.is_locked:
feature_index = feature_set.get(feature_name, None)
if feature_index is None:
feature_index = len(feature_set)
feature_set[feature_name] = feature_index
print_verbose("Feature map: {} == {} -- _1st_ value: {}".format(str(feature_index), feature_name, str(value)))
edge.features[feature_index] = value
return True
else:
feature_index = feature_set.get(feature_name, None)
if feature_index is not None:
edge.features[feature_index] = value
return True
else:
return False
def __set_final_name(self, feature_name):
if not re.search('\[-?[0-9]+\]$', feature_name):
# Identify the window position --> TODO likely deletable from a edge feature generator
feature_name = feature_name + "_[0]"
if not feature_name.startswith(self.__class__.__name__):
feature_name = self.__class__.__name__ + "::" + feature_name
return feature_name
def mk_feature_name(self, prefix, *args):
if prefix is None:
return None
else:
l = [str(x) for x in ([prefix] + list(args))]
return "_".join(l)
def gen_prefix_feat_name(self, field_prefix_feature, *args):
prefix = self.__getattribute__(field_prefix_feature)
pure_name = field_prefix_feature[field_prefix_feature.find("_") + 1:] # Remove "prefix_"
feature_name = self.mk_feature_name(prefix, pure_name, *args)
# print_debug(feature_name, field_prefix_feature, args)
return feature_name
def add(self, feature_set, edge, field_prefix_feature, *args):
feature_name = self.gen_prefix_feat_name(field_prefix_feature, *args)
self.add_to_feature_set(feature_set, edge, feature_name)
def add_with_value(self, feature_set, edge, field_prefix_feature, value, *args):
feature_name = self.gen_prefix_feat_name(field_prefix_feature, *args)
self.add_to_feature_set(feature_set, edge, feature_name, value=value)
from nalaf.features.relations import EdgeFeatureGenerator
from nltk.stem import PorterStemmer
from math import log2
from operator import itemgetter
class TokenFeatureGenerator(EdgeFeatureGenerator):
"""
Token based features for each entity belonging to an edge
"""
def __init__(
self,
prefix_txt=None, # 73 in relna
prefix_pos=None, # 74
prefix_masked_txt=None, # 75
prefix_stem_masked_txt=None, # 76
prefix_ann_type=None, # 77
):
self.stemmer = PorterStemmer()
"""an instance of the PorterStemmer()"""
self.prefix_txt = prefix_txt
self.prefix_pos = prefix_pos
self.prefix_masked_txt = prefix_masked_txt
self.prefix_stem_masked_txt = prefix_stem_masked_txt
self.prefix_ann_type = prefix_ann_type
@abc.abstractmethod
def generate(self, dataset, feature_set, is_training_mode):
"""
Does nothing directly
"""
pass
def token_features(self, token, addendum, edge, feature_set, is_training_mode):
feature_name_1 = self.gen_prefix_feat_name("prefix_txt", addendum, token.word)
self.add_to_feature_set(feature_set, edge, feature_name_1)
feature_name_2 = self.gen_prefix_feat_name("prefix_pos", addendum, token.features['pos'])
self.add_to_feature_set(feature_set, edge, feature_name_2)
feature_name_3 = self.gen_prefix_feat_name("prefix_masked_txt", addendum, token.masked_text(edge.same_part))
self.add_to_feature_set(feature_set, edge, feature_name_3)
# TODO why stem of masked text? -- makes little sense -- See TODO in original loctext too
feature_name_4 = self.gen_prefix_feat_name("prefix_stem_masked_txt", addendum, self.stemmer.stem(token.masked_text(edge.same_part)))
self.add_to_feature_set(feature_set, edge, feature_name_4)
ann_types = self.annotated_types(token, edge)
for ann in ann_types:
feature_name_5 = self.gen_prefix_feat_name("prefix_ann_type", addendum, ann)
self.add_to_feature_set(feature_set, edge, feature_name_5)
def annotated_types(self, token, edge):
head1 = edge.entity1.head_token
head2 = edge.entity2.head_token
if not token.is_entity_part(edge.same_part):
feature_name = 'no_ann_type'
return [feature_name]
else:
ann_types = []
if token.is_entity_part(edge.same_part):
entity = token.get_entity(edge.same_part)
feature_name_1 = entity.class_id
ann_types.append(feature_name_1)
if entity == edge.entity1:
feature_name_2 = 'entity1_' + edge.entity1.class_id
ann_types.append(feature_name_2)
return ann_types
elif entity == edge.entity2:
feature_name_2 = 'entity2_' + edge.entity2.class_id
ann_types.append(feature_name_2)
return ann_types
return ann_types
def calculateInformationGain(feature_set, dataset, output_file):
number_pos_instances = 0
number_neg_instances = 0
for edge in dataset.edges():
if edge.real_target == +1:
number_pos_instances += 1
else:
number_neg_instances += 1
number_total_instances = number_pos_instances + number_neg_instances
percentage_pos_instances = number_pos_instances / number_total_instances
percentage_neg_instances = number_neg_instances / number_total_instances
first_ent_component = -1 * (percentage_pos_instances * log2(percentage_pos_instances) + percentage_neg_instances * log2(percentage_neg_instances))
feature_list = []
for key, value in feature_set.items():
feature_present_in_pos = 0
feature_present_in_neg = 0
feature_absent_in_pos = 0
feature_absent_in_neg = 0
total_feature_present = 0
total_feature_absent = 0
for edge in dataset.edges():
if edge.real_target == +1:
if value in edge.features.keys():
feature_present_in_pos += 1
total_feature_present += 1
else:
feature_absent_in_pos += 1
total_feature_absent +=1
if edge.real_target == -1:
if value in edge.features.keys():
feature_present_in_neg += 1
total_feature_present += 1
else:
feature_absent_in_neg += 1
total_feature_absent += 1
percentage_pos_given_feature = 0
percentage_neg_given_feature = 0
if (total_feature_present > 0):
percentage_pos_given_feature = feature_present_in_pos / to
|
tal_feature
|
_present
percentage_ne
|
Edraak/edraak-platform
|
lms/djangoapps/course_api/helpers.py
|
Python
|
agpl-3.0
| 2,431
| 0.002879
|
from urlparse import urljoin
import requests
from django.conf import settings
from edxmako.shortcuts import marketing_link
import logging
from django.core.cache import cache
import json
log = logging.getLogger(__name__)
def is_marketing_api_enabled():
"""
Checks if the feature is enabled, while making some sanity checks along the way!
"""
if not settings.FEATURES.get('EDRAAK_USE_MARKETING_COURSE_DETAILS_API'):
return False
base_message = 'You have enabled the `EDRAAK_USE_MARKETING_COURSE_DETAILS_API` feature'
if not settings.FEATURES.get('ENABLE_MKTG_SITE'):
raise Exception('{base} {other}'.format(
base=base_message,
other='without enabling the marketing site. Please enable the latter with the '
'`ENABLE_MKTG_SITE` feature flag.',
))
mktg_urls_message = '{base} {other}'.format(
base=base_message,
other='but did not configure either COURSE_DETAILS_API_FORMAT or ROOT in the MKTG_URLS.',
)
try:
if not settings.MKTG_URLS['ROOT'] or not settings.MKTG_URLS['COURSE_DETAILS_API_FORMAT']:
raise Exception(mktg_urls_message)
except KeyError:
raise Exception(mktg_urls_message)
if '{course_id}' not in settings.MKTG_URLS['COURSE_DETAILS_API_FORMAT']:
raise Exception('{base} {other}'.format(
base=base_message,
other='but COURSE_DETAILS_API_FORMAT does not contain the formatting argument `course_id`.',
))
return True
|
def get_marketing_data(course_key, language):
"""
This method gets the current marketing details for a specific
course.
:returns a course details from the mark
|
eting API or None if
no marketing details found.
"""
CACHE_KEY = "MKTG_API_" + str(course_key) + str(language)
if cache.get(CACHE_KEY):
return cache.get(CACHE_KEY)
marketing_root_format = marketing_link('COURSE_DETAILS_API_FORMAT')
url = marketing_root_format.format(course_id=course_key)
response = requests.get(url=url, headers={
'Accept-Language': language,
})
if response.status_code != 200:
log.warning('Could not fetch the marketing details from the API. course_key=[%s], status_code=[%s], url=[%s].',
course_key, response.status_code, url)
return {}
cache.set(CACHE_KEY, response.json(), 30 * 60)
return response.json()
|
IotaMyriad/SmartMirror
|
Widgets/InstalledWidgets/WeatherWidget/ExpandedWeatherWidget.py
|
Python
|
gpl-3.0
| 3,751
| 0.005335
|
# user: smartmirror_elp
# pw: 12345678
# API-key: 68a61abe6601c18b8288c0e133ccaafb
import os
import pyowm
import datetime
from datetime import datetime
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from Widgets.ExpandedWidget import ExpandedWidget
API_key = "68a61abe6601c18b8288c0e133ccaafb"
place = "Toronto,Ca"
tor_lat = 43.6532
tor_long = -79.3832
day_of_week = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
class DailyWeather(QWidget):
date = ""
def __init__(self, date):
super(DailyWeather, self).__init__()
self.date = date
self.initUI()
self.startTimer()
def parse(self, string, start, end):
strlist = string.split(start)
ret = strlist[1].split(end)[0]
ret = ret.split('}')[0]
return ret
def initUI(self):
self.layout = QWidget(self)
self.vbox = QVBoxLayout(self)
self.lbl1 = QLabel(self)
self.lbl1.setStyleSheet("color : white")
date = datetime.strptime(self.date, "%Y-%m-%d %H:%M:%S+00")
self.lbl1.setText(day_of_week[date.weekday()])
self.vbox.addWidget(self.lbl1)
self.lblp = QLabel(self)
self.lblp.setScaledContents(True)
self.vbox.addWidget(self.lblp)
self.lbl2 = QLabel(self)
self.lbl2.setStyleSheet("color : white")
self.vbox.addWidget(self.lbl2)
self.lbl3 = QLabel(self)
self.lbl3.setStyleSheet("color : white")
self.vbox.addWidget(self.lbl3)
self.lbl4 = QLabel(self)
self.lbl4.setStyleSheet("color : white")
self.vbox.addWidget(self.lbl4)
self.setWindowTitle('Weather')
self.layout.setLayout(self.vbox)
self.Update()
def startTimer(self):
self.timer = QTimer(self)
self.timer.setInterval(5000)
self.timer.timeout.connect(self.Update)
self.timer.start()
def Update(self):
owm = pyowm.OWM(API_key)
fc = owm.daily_forecast(place)
f = fc.get_forecast()
w = f.get_weathers()[0]
for weather in f:
if self.date == weather.get_reference_time('iso'):
w = weather
break
daily = w.get_temperature('celsius')
temp_min = float(self.parse(str(daily), "'min': ", ","))
temp_max = float(self.parse(str(daily), "'max': ", ","))
status = w.get_status()
self.lblp.setPixmap(QPixmap(os.getcwd() + "/Widgets/InstalledWidgets/WeatherWidget/weather_icons/Expanded/" + w.get_weather_icon_name()))
self.lbl2.setText("status: " + str(status) + " ")
self.lbl3.setText("daily min: " + str(int(round(temp_min))) + "°C ")
self.lbl4.setText("daily max: " + str(int(round(temp_max))) + "°C ")
class ExpandedWeatherWidget(ExpandedWidget):
def __init__(self):
super(ExpandedWeatherWidget, self).__init__()
self.initUI()
def initUI(self):
self.layout = QGridLayout()
self.widget = QWidget()
self.widget.setStyleSheet("background-color:black;}")
owm = pyowm.OWM(API_key) #
|
You MUST provide a valid API key
fc = owm.daily_forecast(place)
f = fc.get_forecast()
i = 0
weather = f.get_weathers()[0]
for weather in f:
day = DailyWeather(weather.get_reference_time('iso'))
day.setStyleSheet("background-color:black;");
|
self.layout.addWidget(day,0,i)
i += 1
#self.layout.addWidget(self.widget)
self.setLayout(self.layout)
@staticmethod
def name():
return "WeatherWidget"
|
kdart/pycopia
|
QA/pycopia/QA/jobrunner.py
|
Python
|
apache-2.0
| 5,084
| 0.00118
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An interface for running test cases as unattended jobs.
"""
import sys
import os
from pycopia import logging
from pycopia import aid
from pycopia import shparser
from pycopia import getopt
from pycopia.QA import testloader
from pycopia.db import models
def _parse_parameters(text):
def _ParserCB(d, argv):
for param in argv:
if param.find("=") > 0:
key, value = param.split("=", 1)
d[key] = value
d = {}
p = shparser.ShellParser(aid.partial(_ParserCB, d))
p.feed(text)
p.feed("\n")
return d
def get_test_jobs(args):
dbsession = models.get_session()
TJ = models.TestJob
for jobid in args:
try:
jobid = int(jobid)
except ValueError:
pass
try:
if type(jobid) is int:
testjob = dbsession.query(TJ).get(jobid)
else:
testjob = dbsession.query(TJ).filter(TJ.name==jobid).one()
except models.NoResultFound:
logging.warn("No TestJob with id %r" % jobid)
continue
else:
yield testjob
JobRunnerInterfaceDoc = r"""
Invoke a test job (TestJob object) from a shell.
Test jobs encapsulate a test suite, environment, parameters, user, and
report. Therefore these things are not supplied to this interface.
However, some shared configuration parameters may be supplied as long
options to this job runner.
A job is selected by its ID number, or its unique name.
Only automated, non-interactive tests should be added to suite run by
a test job.
Often run from cron.
Usage:
%s [-h?] arg...
Where the arguments are job names or job id.
Options:
-h -- Print help text and return.
Long-style options are passed into the test suite configuration.
"""
class JobRunnerInterface(object):
def __init__(self, testrunner):
self.runner = testrunner
cf = self.runner.config
cf.flags.DEBUG = 0
cf.flags.VERBOSE = 0
cf.flags.INTERACTIVE = False
cf.userinterfacetype = "none"
def __call__(self, argv):
"""Invoke the job runner by calling it with argument list.
"""
cf = self.runner.config
optlist, extraopts, args = getopt.getopt(argv[1:], "h?")
for opt, optarg in optlist:
if opt in ("-h", "-?"):
print JobRunnerInterfaceDoc % (os.path.basename(argv[0]),)
return
cf.evalupdate(extraopts)
self.runner.set_options(extraopts)
for testjob in get_test_jobs(args):
if testjob is None:
continue
cf.environmentname = testjob.environment.name
if self.is_job_running(testjob.id):
continue
self.create_job_lock(testjob.id)
try:
if testjob.parameters:
params = _parse_parameters(testjob.parameters)
cf.arg
|
uments = testjob.parameters.split()
else:
|
params = {}
cf.argv = [testjob.suite.name]
cf.comment = "Automated test job %s(%s)." % (testjob.name, testjob.id)
cf.reportname = testjob.reportname
cf.evalupdate(params)
self.runner.set_options(params)
suite = get_suite(testjob.suite, cf)
self.runner.initialize()
self.runner.run_object(suite)
self.runner.finalize()
finally:
self.remove_job_lock(testjob.id)
def create_job_lock(self, jobid):
lf = self._get_job_lockfile(jobid)
open(lf, "w").close()
def remove_job_lock(self, jobid):
lf = self._get_job_lockfile(jobid)
os.unlink(lf)
def is_job_running(self, jobid):
lf = self._get_job_lockfile(jobid)
return os.path.exists(lf)
def _get_job_lockfile(self, jobid):
envname = self.runner.config.environmentname
return "/var/tmp/testjob_{0}_{1}.lock".format(envname, jobid)
def get_suite(dbsuite, config):
suite = testloader.get_suite(dbsuite, config)
for dbtestcase in dbsuite.testcases:
testclass = testloader.get_test_class(dbtestcase)
if testclass is not None:
suite.add_test(testclass)
for subsuite in dbsuite.subsuites:
suite.add_suite(get_suite(subsuite, config))
return suite
|
miketheman/opencomparison
|
grid/context_processors.py
|
Python
|
mit
| 430
| 0.002326
|
from itertools import izip, chain, r
|
epeat
from grid.models import Grid
def grouper(n, iterable, padvalue=None):
"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
return izip(*[chain(iterable, repeat(padvalue, n-1))]*n)
def grid_headers(request):
grid_headers = list(Grid.objects.filter(header=True))
grid_headers = grouper(7, grid_
|
headers)
return {'grid_headers': grid_headers}
|
arthurdarcet/aiohttp
|
aiohttp/multipart.py
|
Python
|
apache-2.0
| 32,819
| 0.000853
|
import base64
import binascii
import json
import re
import uuid
import warnings
import zlib
from collections import deque
from types import TracebackType
from typing import ( # noqa
TYPE_CHECKING,
Any,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from urllib.parse import parse_qsl, unquote, urlencode
from multidict import CIMultiDict, CIMultiDictProxy, MultiMapping # noqa
from .hdrs import (
CONTENT_DISPOSITION,
CONTENT_ENCODING,
CONTENT_LENGTH,
CONTENT_TRANSFER_ENCODING,
CONTENT_TYPE,
)
from .helpers import CHAR, TOKEN, parse_mimetype, reify
from .http import HeadersParser
from .payload import (
JsonPayload,
LookupError,
Order,
Payload,
StringPayload,
get_payload,
payload_type,
)
from .streams import StreamReader
__all__ = ('MultipartReader', 'MultipartWriter', 'BodyPartReader',
'BadContentDispositionHeader', 'BadContentDispositionParam',
'parse_content_disposition', 'content_disposition_filename')
if TYPE_CHECKING: # pragma: no cover
from .client_reqrep import ClientResponse # noqa
class BadContentDispositionHeader(RuntimeWarning):
pass
class BadContentDispositionParam(RuntimeWarning):
pass
def parse_content_disposition(header: Optional[str]) -> Tuple[Optional[str],
Dict[str, str]]:
def is_token(string: str) -> bool:
return bool(string) and TOKEN >= set(string)
def is_quoted(string: str) -> bool:
return string[0] == string[-1] == '"'
def is_rfc5987(string: str) -> bool:
return is_token(string) and string.count("'") == 2
def is_extended_param(string: str) -> bool:
return string.endswith('*')
def is_continuous_param(string: str) -> bool:
pos = string.find('*') + 1
if not pos:
return False
substring = string[pos:-1] if string.endswith('*') else string[pos:]
return substring.isdigit()
def unescape(text: str, *,
chars: str=''.join(map(re.escape, CHAR))) -> str:
return re.sub('\\\\([{}])'.format(chars), '\\1', text)
if not header:
return None, {}
disptype, *parts = header.split(';')
if not is_token(disptype):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params = {} # type: Dict[str, str]
while parts:
item = parts.pop(0)
if '=' not in item:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
key, value = item.split('=', 1)
key = key.lower().strip()
value = value.lstrip()
if key in params:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
if not is_token(key):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_continuous_param(key):
if is_quoted(value):
value = unescape(value[1:-1])
elif not is_token(value):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_extended_param(key):
if is_rfc5987(value):
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
else:
warnings.warn(BadContentDispositionParam(item))
continue
try:
value = unquote(value, encoding, 'strict')
except UnicodeDecodeError: # pragma: nocover
warnings.warn(BadContentDispositionParam(item))
continue
else:
failed = True
if is_quoted(value):
failed = False
value = unescape(value[1:-1].lstrip('\\/'))
elif is_token(value):
failed = False
elif parts:
# maybe just ; in filename, in any case this is just
# one case fix, for proper fix we need to redesign parser
_value = '%s;%s' % (value, parts[0])
if is_quoted(_value):
parts.pop(0)
value = unescape(_value[1:-1].lstrip('\\/'))
failed = False
if failed:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params[key] = value
return disptype.lower(), params
def content_disposition_filename(params: Mapping[str, str],
name: str='filename') -> Optional[str]:
name_suf = '%s*' % name
if not params:
return None
elif name_suf in params:
return params[name_suf]
elif name in params:
return params[name]
else:
parts = []
fnparams = sorted((key, value)
for key, value in params.items()
if key.startswith(name_suf))
for num, (key, value) in enumerate(fnparams):
_, tail = key.split('*', 1)
if tail.endswith('*'):
tail = tail[:-1]
if tail == str(num):
parts.append(value)
else:
break
if not parts:
return None
value = ''.join(parts)
if "'" in value:
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
return unquote(value, encoding, 'strict')
return value
class MultipartResponseWrapper:
"""Wrapper around the MultipartBodyReader.
It takes care about
underlying connection and close it when it needs in.
"""
def __init__(self, resp: 'ClientResponse', stream: Any) -> None:
# TODO: add strong annotation to stream
self.resp = resp
self.stream = stream
def __aiter__(self) -> 'MultipartResponseWrapper':
return self
async def __anext__(self) -> Any:
part = await self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
def at_eof(self) -> bool:
"""Returns True when all response data had been read."""
return self.resp.content.at_eof()
async def next(self) -> Any:
"""Emits next multipart reader object."""
item = await self.stream.next()
if self.stream.at_eof():
await self.release()
return item
async def release(self) -> None:
"""Releases the connection gracefully, reading all the content
to the void."""
await self.resp.release()
class BodyPartReader:
"""Multipart reader for single body part."""
chunk_size = 8192
def __init__(self, boundary: bytes,
headers: Mapping[str, Optional[str]],
content: StreamReader) -> None:
self.headers = headers
self._boundary = boundary
self._content = content
self._at_eof = False
length = self.headers.get(CONTENT_LENGTH, None)
self._length = int(length) if length is not None else None
self._read_bytes = 0
# TODO: typeing.Deque is not supported by Python 3.5
self._unread = deque() # type: Any
self._prev_chunk = None # type: Optional[bytes]
self._content_eof = 0
self._cache = {} # type: Dict[str, Any]
def __aiter__(self) ->
|
'BodyPartReader':
return self
async def __anext__(self) -> Any:
part = await self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
async def next(self
|
) -> Any:
item = await self.read()
if not item:
return None
return item
async def read(self, *, decode: bool=False) -> Any:
"""Reads body part data.
decode: Decodes data following by encoding
method from Content-Encoding header. If it missed
data remains untouched
"""
if self._at_eof:
return b''
data = bytearray()
while not self._at_eof:
data.extend((await self
|
TomAugspurger/pandas
|
pandas/io/sql.py
|
Python
|
bsd-3-clause
| 62,333
| 0.000433
|
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from contextlib import contextmanager
from datetime import date, datetime, time
from functools import partial
import re
from typing import Iterator, Optional, Union, overload
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas.core.dtypes.common import is_datetime64tz_dtype, is_dict_like, is_list_like
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy # noqa: F811
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""Convert SQL and params args to DBAPI2.0 compliant format."""
args = [sql]
if params is not None:
if hasattr(params, "keys"): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _process_parse_dates_argument(parse_dates):
"""Process parse_dates argument for read_sql functions"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, "__iter__"):
parse_dates = [parse_dates]
return parse_dates
def _handle_date_column(col, utc=None, format=None):
if isinstance(format, dict):
return to_datetime(col, errors="ignore", **format)
else:
# Allow passing of formatting string for integers
# GH17855
if format is None and (
issubclass(col.dtype.type, np.floating)
or issubclass(col.dtype.type, np.integer)
):
format = "s"
if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
return to_datetime(col, errors="coerce", unit=format, utc=utc)
elif is_datetime64tz_dtype(col.dtype):
# coerce to UTC timezone
# GH11216
return to_datetime(col, utc=True)
else:
return to_datetime(col, errors="coerce", format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.items():
if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True, parse_dates=None):
"""Wrap result set of query in a DataFrame."""
frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by the
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
@overload
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql_table(
table_name,
|
con,
schema=None,
index_col=No
|
ne,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or str
A database URI could be provided as as str.
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default None
List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
Examples
--------
>>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError(
"read_sql_table only supported for SQLAlchemy connectable."
)
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError as err:
|
meyt/mehrcal
|
mehrcal/yapsy/PluginManager.py
|
Python
|
gpl-3.0
| 22,880
| 0.025481
|
#!/usr/bin/python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t; python-indent: 4 -*-
"""
Role
====
The ``PluginManager`` loads plugins that enforce the `Plugin
Description Policy`_, and offers the most simple methods to activate
and deactivate the plugins once they are loaded.
.. note:: It may also classify the plugins in various categories, but
t
|
his behaviour is optional and if not specified elseway all
plugins are stored in the same default category.
.. no
|
te:: It is often more useful to have the plugin manager behave
like singleton, this functionality is provided by
``PluginManagerSingleton``
Plugin Description Policy
=========================
When creating a ``PluginManager`` instance, one should provide it with
a list of directories where plugins may be found. In each directory,
a plugin should contain the following elements:
For a *Standard* plugin:
``myplugin.yapsy-plugin``
A *plugin info file* identical to the one previously described.
``myplugin``
A directory ontaining an actual Python plugin (ie with a
``__init__.py`` file that makes it importable). The upper
namespace of the plugin should present a class inheriting the
``IPlugin`` interface (the same remarks apply here as in the
previous case).
For a *Single file* plugin:
``myplugin.yapsy-plugin``
A *plugin info file* which is identified thanks to its extension,
see the `Plugin Info File Format`_ to see what should be in this
file.
The extension is customisable at the ``PluginManager``'s
instanciation, since one may usually prefer the extension to bear
the application name.
``myplugin.py``
The source of the plugin. This file should at least define a class
inheriting the ``IPlugin`` interface. This class will be
instanciated at plugin loading and it will be notified the
activation/deactivation events.
Plugin Info File Format
-----------------------
The plugin info file is a text file *encoded in ASCII or UTF-8* and
gathering, as its name suggests, some basic information about the
plugin.
- it gives crucial information needed to be able to load the plugin
- it provides some documentation like information like the plugin
author's name and a short description fo the plugin functionality.
Here is an example of what such a file should contain::
[Core]
Name = My plugin Name
Module = the_name_of_the_pluginto_load_with_no_py_ending
[Documentation]
Description = What my plugin broadly does
Author = My very own name
Version = the_version_number_of_the_plugin
Website = My very own website
.. note:: From such plugin descriptions, the ``PluginManager`` will
built its own representations of the plugins as instances of
the :doc:`PluginInfo` class.
Changing the default behaviour
==============================
The default behaviour for locating and loading plugins can be changed
using the various options exposed on the interface via getters.
The plugin detection, in particular, can be fully customized by
settting a custom plugin locator. See ``IPluginLocator`` for more
details on this.
Extensibility
=============
Several mechanisms have been put up to help extending the basic
functionalities of the proivided classes.
A few *hints* to help you extend those classes:
If the new functionalities do not overlap the ones already
implemented, then they should be implemented as a Decorator class of the
base plugin. This should be done by inheriting the
``PluginManagerDecorator``.
If this previous way is not possible, then the functionalities should
be added as a subclass of ``PluginManager``.
.. note:: The first method is highly prefered since it makes it
possible to have a more flexible design where one can pick
several functionalities and litterally *add* them to get an
object corresponding to one's precise needs.
API
===
"""
import sys
import os
import imp
from yapsy import log
from yapsy import NormalizePluginNameForModuleName
from yapsy.IPlugin import IPlugin
from yapsy.IPluginLocator import IPluginLocator
# The follozing two imports are used to implement the default behaviour
from yapsy.PluginFileLocator import PluginFileAnalyzerWithInfoFile
from yapsy.PluginFileLocator import PluginFileLocator
# imported for backward compatibility (this variable was defined here
# before 1.10)
from yapsy import PLUGIN_NAME_FORBIDEN_STRING
# imported for backward compatibility (this PluginInfo was imported
# here before 1.10)
from yapsy.PluginInfo import PluginInfo
class PluginManager(object):
"""
Manage several plugins by ordering them in categories.
The mechanism for searching and loading the plugins is already
implemented in this class so that it can be used directly (hence
it can be considered as a bit more than a mere interface)
The file describing a plugin must be written in the syntax
compatible with Python's ConfigParser module as in the
`Plugin Info File Format`_
"""
def __init__(self,
categories_filter=None,
directories_list=None,
plugin_info_ext=None,
plugin_locator=None):
"""
Initialize the mapping of the categories and set the list of
directories where plugins may be. This can also be set by
direct call the methods:
- ``setCategoriesFilter`` for ``categories_filter``
- ``setPluginPlaces`` for ``directories_list``
- ``setPluginInfoExtension`` for ``plugin_info_ext``
You may look at these function's documentation for the meaning
of each corresponding arguments.
"""
# as a good practice we don't use mutable objects as default
# values (these objects would become like static variables)
# for function/method arguments, but rather use None.
if categories_filter is None:
categories_filter = {"Default":IPlugin}
self.setCategoriesFilter(categories_filter)
plugin_locator = self._locatorDecide(plugin_info_ext, plugin_locator)
# plugin_locator could be either a dict defining strategies, or directly
# an IPluginLocator object
self.setPluginLocator(plugin_locator, directories_list)
def _locatorDecide(self, plugin_info_ext, plugin_locator):
"""
For backward compatibility, we kept the *plugin_info_ext* argument.
Thus we may use it if provided. Returns the (possibly modified)
*plugin_locator*.
"""
specific_info_ext = plugin_info_ext is not None
specific_locator = plugin_locator is not None
if not specific_info_ext and not specific_locator:
# use the default behavior
res = PluginFileLocator()
elif not specific_info_ext and specific_locator:
# plugin_info_ext not used
res = plugin_locator
elif not specific_locator and specific_info_ext:
# plugin_locator not used, and plugin_info_ext provided
# -> compatibility mode
res = PluginFileLocator()
res.setAnalyzers([PluginFileAnalyzerWithInfoFile("info_ext",plugin_info_ext)])
elif specific_info_ext and specific_locator:
# both provided... issue a warning that tells "plugin_info_ext"
# will be ignored
msg = ("Two incompatible arguments (%s) provided:",
"'plugin_info_ext' and 'plugin_locator'). Ignoring",
"'plugin_info_ext'.")
raise ValueError(" ".join(msg) % self.__class__.__name__)
return res
def setCategoriesFilter(self, categories_filter):
"""
Set the categories of plugins to be looked for as well as the
way to recognise them.
The ``categories_filter`` first defines the various categories
in which the plugins will be stored via its keys and it also
defines the interface tha has to be inherited by the actual
plugin class belonging to each category.
"""
self.categories_interfaces = categories_filter.copy()
# prepare the mapping from categories to plugin lists
self.category_mapping = {}
# also maps the plugin info files (useful to avoid loading
# twice the same plugin...)
self._category_file_mapping = {}
for categ in categories_filter:
self.category_mapping[categ] = []
self._category_file_mapping[categ] = []
def setPluginPlace
|
Orav/kbengine
|
assets/scripts/login/kbemain.py
|
Python
|
lgpl-3.0
| 2,617
| 0.036195
|
# -*- coding: utf-8 -*-
import os
import KBEngine
from KBEDebug import *
"""
loginapp进程主要处理KBEngine服务端登陆、创建账号等工作。
目前脚本支持几种功能:
1: 注册账号检查
2:登陆检查
3:自定义socket回调,参考interface中Poller实现
"""
def onLoginAppReady():
"""
KBEngine method.
loginapp已经准备好了
"""
INFO_MSG('onLoginAppReady: bootstrapGroupIndex=%s, bootstrapGlobalIndex=%s' % \
(os.getenv("KBE_BOOTIDX_GROUP"), os.getenv("KBE_BOOTIDX_GLOBAL")))
#KBEngine.addTimer(0.01, 1.0, onTick)
def onTick(timerID):
"""
"""
INFO_MSG('onTick()')
def onLoginAppShutDown():
"""
KBEngine method.
这个loginapp被关闭前的回调函数
"""
INFO_MSG('onLoginAppShutDown()')
def onReuqestLogin(loginName, password, clientType, datas):
"""
KBEngine method.
账号请求登陆时回调
此处还可以对登陆进行排队,将排队信息存放于datas
"""
INFO_MSG('onReuqestLogin() loginName=%s, clientType=%s' % (loginName, clientType))
errorno = KBEngine.SERVER_SUCCESS
if len(loginName) > 64:
errorno = KBEngine.SERVER_ERR_NAME;
if len(password) > 64:
errorno = KBEngine.SERVER_ERR_PASSWORD;
return (errorno, loginName, password, clientType, datas)
def onLoginCallbackFromDB(loginName, accountName, errorno, datas):
"""
KBEngine method.
账号请求登陆后db验证回调
loginName:登录名既登录时客户端输入的名称。
accountName: 账号名则是dbmgr查询得到的名称。
errorno: KBEngine.SERVER_ERR_*
这个机制用于一个账号多名称系统或者多个第三方账号系统登入服务器。
客户端得到baseapp地址的同时也会返回这个账号名称,客户端登陆baseapp应该使用这个账号名称登陆
"""
INFO_MSG('onLoginCallbackFromDB() loginName=%s, accountName=%s, errorno=%s' % (loginName, accountName, errorno))
def onRequestCreateAccount(accountName, password, datas):
"""
KBEngine method.
请求账号创建时回调
"""
INFO_MSG('onRequestCreateAccount() %s' % (accountName))
errorno = KBEngine.SERVER_SUCCESS
if len(accountName) > 64:
errorno = KBEngine.SERVER_ERR
|
_NAME;
if len(password) > 64:
errorno = KBEngine.SERVER_ERR_PASSWORD;
return (errorno, accountName, password, datas)
def onCreateAccountCallbackFromDB(accountName, errorno, datas):
"""
|
KBEngine method.
账号请求注册后db验证回调
errorno: KBEngine.SERVER_ERR_*
"""
INFO_MSG('onCreateAccountCallbackFromDB() accountName=%s, errorno=%s' % (accountName, errorno))
|
mxrrow/zaicoin
|
src/deps/boost/tools/build/v2/test/TestCmd.py
|
Python
|
mit
| 23,923
| 0.002424
|
"""
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing of
executable commands and scripts (in any language, not just Python), especially
commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd module
manages and cleans up one or more temporary workspace directories, and provides
methods for creating files and directories in those workspace directories from
in-line data, here-documents), allowing tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
test = TestCmd()
The TestCmd module provides pass_test(), fail_test(), and no_result() unbound
methods that report test results for use with the Aegis change management
system. These methods terminate the test immediately, reporting PASSED, FAILED
or NO RESULT respectively and exiting with status 0 (success), 1 or 2
respectively. This allows for a distinction between an actual failed test and a
test that could not be properly evaluated because of an external condition (such
as a full file system or incorrect permissions).
"""
# Copyright 2000 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
# Copyright 2002-2003 Vladimir Prus.
# Copyright 2002-2003 Dave Abrahams.
# Copyright 2006 Rene Rivera.
# Distributed under the Boost Software Lic
|
ense, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from string import join, split
__author__ = "Steven Knight <knight@baldmt.com>"
__revision__ = "TestCmd.py 0.D002 2001/08/31 14:56:12 software"
__version__ = "0.02"
from types import *
import os
import os.path
import popen2
import re
import shutil
import stat
import sys
import tem
|
pfile
import traceback
tempfile.template = 'testcmd.'
_Cleanup = []
def _clean():
global _Cleanup
list = _Cleanup[:]
_Cleanup = []
list.reverse()
for test in list:
test.cleanup()
sys.exitfunc = _clean
def caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name == "?":
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self=None, condition=True, function=None, skip=0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED and exits
with a status of 1. If a condition argument is supplied, the test fails only
if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + join(self.program, " ")
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at + """
in directory: """ + os.getcwd() )
sys.exit(1)
def no_result(self=None, condition=True, function=None, skip=0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test and exits
with a status of 2. If a condition argument is supplied, the test fails only
if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
sys.exit(2)
def pass_test(self=None, condition=True, function=None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test and exits
with a status of 0. If a condition argument is supplied, the test passes
only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines=None, matches=None):
"""Returns whether the given lists or strings containing lines separated
using newline characters contain exactly the same data.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(matches) is ListType:
matches = split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines=None, res=None):
"""Given lists or strings contain lines separated using newline characters.
This function matches those lines one by one, interpreting the lines in the
res parameter as regular expressions.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(res) is ListType:
res = split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
if not re.compile("^" + res[i] + "$").search(lines[i]):
return
return 1
class TestCmd:
"""Class TestCmd.
"""
def __init__(self, description=None, program=None, workdir=None,
subdir=None, verbose=False, match=None, inpath=None):
self._cwd = os.getcwd()
self.description_set(description)
if inpath:
self.program = program
else:
self.program_set(program)
self.verbose_set(verbose)
if not match is None:
self.match_func = match
else:
self.match_func = match_re
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
if os.environ.has_key('PRESERVE') and not os.environ['PRESERVE'] is '':
self._preserve['pass_test'] = os.environ['PRESERVE']
self._preserve['fail_test'] = os.environ['PRESERVE']
self._preserve['no_result'] = os.environ['PRESERVE']
else:
try:
self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
except KeyError:
pass
try:
self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
except KeyError:
pass
try:
self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
except KeyError:
pass
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
def cleanup(self, condition=None):
"""Removes any temporary working directories for the specified TestCmd
environment. If the environment variable PRESERVE was set when the
Test
|
apinsard/khango
|
khango/templatetags/khango.py
|
Python
|
mit
| 169
| 0
|
# -*- coding: utf-8 -*-
from
|
django.template import Library
register = Library()
@register.simple_tag(name='getattr')
def _getattr(*args):
|
return getattr(*args)
|
eustislab/horton
|
scripts/horton-esp-test.py
|
Python
|
gpl-3.0
| 3,917
| 0.004595
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
import sys, argparse, os, numpy as np
from horton import log, __version__
from horton.scripts.common import parse_h5, store_args, check_output, \
write_script_output
from horton.scripts.espfit import load_charges, load_cost
# All, except underflows, is *not* fine.
np.seterr(divide='raise', over='raise', invalid='raise')
def parse_args():
parser = argparse.ArgumentParser(prog='horton-esp-test.py',
description='Test how well charges reproduce the ESP.')
parser.add_argument('-V', '--version', action='version',
version="%%(prog)s (HORTON version %s)" % __version__)
parser.add_argument('cost',
help='
|
The location of the cost function in the form '
'"file.h5:group/cost". This argument must be the same as the '
'output argument of the script horton-esp-cost.py.')
parser.add_argument('charges', type=str,
help='The atomic charges to be used in the form '
|
'"file.h5:group/charges". ')
parser.add_argument('output', type=str,
help='The output destination in the form file.h5:group. The colon and '
'the group name are optional. When omitted, the root group of the '
'HDF5 file is used.')
parser.add_argument('--overwrite', default=False, action='store_true',
help='Overwrite existing output in the HDF5 file')
parser.add_argument('--qtot', '-q', default=None, type=float,
help='The total charge of the system. When given, the charges from the '
'HDF5 file are corrected.')
return parser.parse_args()
def main():
args = parse_args()
fn_h5, grp_name = parse_h5(args.output, 'output')
# check if the group is already present (and not empty) in the output file
if check_output(fn_h5, grp_name, args.overwrite):
return
# Load the cost function from the HDF5 file
cost, used_volume = load_cost(args.cost)
# Load the charges from the HDF5 file
charges = load_charges(args.charges)
# Fix total charge if requested
if args.qtot is not None:
charges -= (charges.sum() - args.qtot)/len(charges)
# Store parameters in output
results = {}
results['qtot'] = charges.sum()
# Fitness of the charges
results['cost'] = cost.value_charges(charges)
if results['cost'] < 0:
results['rmsd'] = 0.0
else:
results['rmsd'] = (results['cost']/used_volume)**0.5
# Worst case stuff
results['cost_worst'] = cost.worst(0.0)
if results['cost_worst'] < 0:
results['rmsd_worst'] = 0.0
else:
results['rmsd_worst'] = (results['cost_worst']/used_volume)**0.5
# Write some things on screen
if log.do_medium:
log('RMSD charges: %10.5e' % np.sqrt((charges**2).mean()))
log('RMSD ESP: %10.5e' % results['rmsd'])
log('Worst RMSD ESP: %10.5e' % results['rmsd_worst'])
log.hline()
# Store the results in an HDF5 file
write_script_output(fn_h5, grp_name, results, args)
if __name__ == '__main__':
main()
|
Aircollition/Aircollition
|
Python scripts/theta.py
|
Python
|
mit
| 378
| 0.02381
|
import numpy as np
import matplotlib.p
|
yplot as plt
npoint = 100
dec = 3
x = np.linspace(0,1,npoint)
a = dec * np.linspace(0,1,npoint/2)
b = dec * np.linspace(1,0,npoint/2)
delta = np.concatenate((a,b))
delta1 = dec * np.linspace(0,1,npoint)
delta2 = dec * np.ones(npoint)
plt.figure()
plt.plot(x, delta2)
plt.f
|
igure()
plt.plot(x, delta1)
plt.figure()
plt.plot(x, delta)
|
harmy/kbengine
|
kbe/res/scripts/common/Lib/test/test_profile.py
|
Python
|
lgpl-3.0
| 7,166
| 0.001535
|
"""Test suite for the profile module."""
import sys
import pstats
import unittest
from difflib import unified_diff
from io import StringIO
from test.support import run_unittest
import profile
from test.profilee import testfunc, timer
class ProfileTest(unittest.TestCase):
profilerclass = profile.Profile
methodnames = ['print_stats', 'print_callers', 'print_callees']
expected_max_output = ':0(max)'
def get_expected_output(self):
return _ProfileOutput
@classmethod
def do_profiling(cls):
results = []
prof = cls.profilerclass(timer, 0.001)
start_timer = timer()
prof.runctx("testfunc()", globals(), locals())
results.append(timer() - start_timer)
for methodname in cls.methodnames:
s = StringIO()
stats = pstats.Stats(prof, stream=s)
stats.strip_dirs().sort_stats("stdname")
getattr(stats, methodname)()
output = s.getvalue().splitlines()
mod_name = testfunc.__module__.rsplit('.', 1)[1]
# Only compare against stats originating from the test file.
# Prevents outside code (e.g., the io module) from causing
# unexpected output.
output = [line.rstrip() for
|
line in output if mod_name in line]
results.append('\n'.join(output))
return results
def test_cprofile(self):
results = self.do_profiling()
expected = self.get_expected_output()
self.assertEqual(results[0], 1000)
|
for i, method in enumerate(self.methodnames):
if results[i+1] != expected[method]:
print("Stats.%s output for %s doesn't fit expectation!" %
(method, self.profilerclass.__name__))
print('\n'.join(unified_diff(
results[i+1].split('\n'),
expected[method].split('\n'))))
def test_calling_conventions(self):
# Issue #5330: profile and cProfile wouldn't report C functions called
# with keyword arguments. We test all calling conventions.
stmts = [
"max([0])",
"max([0], key=int)",
"max([0], **dict(key=int))",
"max(*([0],))",
"max(*([0],), key=int)",
"max(*([0],), **dict(key=int))",
]
for stmt in stmts:
s = StringIO()
prof = self.profilerclass(timer, 0.001)
prof.runctx(stmt, globals(), locals())
stats = pstats.Stats(prof, stream=s)
stats.print_stats()
res = s.getvalue()
self.assertIn(self.expected_max_output, res,
"Profiling {0!r} didn't report max:\n{1}".format(stmt, res))
def regenerate_expected_output(filename, cls):
filename = filename.rstrip('co')
print('Regenerating %s...' % filename)
results = cls.do_profiling()
newfile = []
with open(filename, 'r') as f:
for line in f:
newfile.append(line)
if line.startswith('#--cut'):
break
with open(filename, 'w') as f:
f.writelines(newfile)
f.write("_ProfileOutput = {}\n")
for i, method in enumerate(cls.methodnames):
f.write('_ProfileOutput[%r] = """\\\n%s"""\n' % (
method, results[i+1]))
f.write('\nif __name__ == "__main__":\n main()\n')
def test_main():
run_unittest(ProfileTest)
def main():
if '-r' not in sys.argv:
test_main()
else:
regenerate_expected_output(__file__, ProfileTest)
# Don't remove this comment. Everything below it is auto-generated.
#--cut--------------------------------------------------------------------------
_ProfileOutput = {}
_ProfileOutput['print_stats'] = """\
28 27.972 0.999 27.972 0.999 profilee.py:110(__getattr__)
1 269.996 269.996 999.769 999.769 profilee.py:25(testfunc)
23/3 149.937 6.519 169.917 56.639 profilee.py:35(factorial)
20 19.980 0.999 19.980 0.999 profilee.py:48(mul)
2 39.986 19.993 599.830 299.915 profilee.py:55(helper)
4 115.984 28.996 119.964 29.991 profilee.py:73(helper1)
2 -0.006 -0.003 139.946 69.973 profilee.py:84(helper2_indirect)
8 311.976 38.997 399.912 49.989 profilee.py:88(helper2)
8 63.976 7.997 79.960 9.995 profilee.py:98(subhelper)"""
_ProfileOutput['print_callers'] = """\
:0(append) <- profilee.py:73(helper1)(4) 119.964
:0(exc_info) <- profilee.py:73(helper1)(4) 119.964
:0(hasattr) <- profilee.py:73(helper1)(4) 119.964
profilee.py:88(helper2)(8) 399.912
profilee.py:110(__getattr__) <- :0(hasattr)(12) 11.964
profilee.py:98(subhelper)(16) 79.960
profilee.py:25(testfunc) <- <string>:1(<module>)(1) 999.767
profilee.py:35(factorial) <- profilee.py:25(testfunc)(1) 999.769
profilee.py:35(factorial)(20) 169.917
profilee.py:84(helper2_indirect)(2) 139.946
profilee.py:48(mul) <- profilee.py:35(factorial)(20) 169.917
profilee.py:55(helper) <- profilee.py:25(testfunc)(2) 999.769
profilee.py:73(helper1) <- profilee.py:55(helper)(4) 599.830
profilee.py:84(helper2_indirect) <- profilee.py:55(helper)(2) 599.830
profilee.py:88(helper2) <- profilee.py:55(helper)(6) 599.830
profilee.py:84(helper2_indirect)(2) 139.946
profilee.py:98(subhelper) <- profilee.py:88(helper2)(8) 399.912"""
_ProfileOutput['print_callees'] = """\
:0(hasattr) -> profilee.py:110(__getattr__)(12) 27.972
<string>:1(<module>) -> profilee.py:25(testfunc)(1) 999.769
profilee.py:110(__getattr__) ->
profilee.py:25(testfunc) -> profilee.py:35(factorial)(1) 169.917
profilee.py:55(helper)(2) 599.830
profilee.py:35(factorial) -> profilee.py:35(factorial)(20) 169.917
profilee.py:48(mul)(20) 19.980
profilee.py:48(mul) ->
profilee.py:55(helper) -> profilee.py:73(helper1)(4) 119.964
profilee.py:84(helper2_indirect)(2) 139.946
profilee.py:88(helper2)(6) 399.912
profilee.py:73(helper1) -> :0(append)(4) -0.004
profilee.py:84(helper2_indirect) -> profilee.py:35(factorial)(2) 169.917
profilee.py:88(helper2)(2) 399.912
profilee.py:88(helper2) -> :0(hasattr)(8) 11.964
profilee.py:98(subhelper)(8) 79.960
profilee.py:98(subhelper) -> profilee.py:110(__getattr__)(16) 27.972"""
if __name__ == "__main__":
main()
|
pychess/pychess
|
lib/pychess/Variants/racingkings.py
|
Python
|
gpl-3.0
| 1,813
| 0.004413
|
""" The Racing Kings Variation"""
from pychess.Utils.const import RACINGKINGSCHESS, V
|
ARIANTS_OTHER_NONSTANDARD, \
|
A8, B8, C8, D8, E8, F8, G8, H8
from pychess.Utils.Board import Board
RACINGKINGSSTART = "8/8/8/8/8/8/krbnNBRK/qrbnNBRQ w - - 0 1"
RANK8 = (A8, B8, C8, D8, E8, F8, G8, H8)
class RacingKingsBoard(Board):
""" :Description: The Racing Kings variation is where the object of the game
is to bring your king to the eight row.
"""
variant = RACINGKINGSCHESS
__desc__ = _(
"In this game, check is entirely forbidden: not only is it forbidden\n" +
"to move ones king into check, but it is also forbidden to check the opponents king.\n" +
"The purpose of the game is to be the first player that moves his king to the eight row.\n" +
"When white moves their king to the eight row, and black moves directly after that also\n" +
"their king to the last row, the game is a draw\n" +
"(this rule is to compensate for the advantage of white that they may move first.)\n" +
"Apart from the above, pieces move and capture precisely as in normal chess."
)
name = _("Racing Kings")
cecp_name = "racingkings"
need_initial_board = True
standard_rules = False
variant_group = VARIANTS_OTHER_NONSTANDARD
def __init__(self, setup=False, lboard=None):
if setup is True:
Board.__init__(self, setup=RACINGKINGSSTART, lboard=lboard)
else:
Board.__init__(self, setup=setup, lboard=lboard)
def testKingInEightRow(board):
""" Test for a winning position """
return board.kings[board.color - 1] in RANK8
def test2KingInEightRow(board):
""" Test for a winning position """
return board.kings[board.color] in RANK8 and board.kings[board.color - 1] in RANK8
|
mikpin/plugin.video.italian-news
|
default.py
|
Python
|
gpl-2.0
| 1,928
| 0.028527
|
import sys, xbmcplugin, xbmcgui,xbmc
_id = "plugin.video.italian-news"
_resdir = "special://home/addons/" + _id + "/resources"
_thisPlugin = int(sys.argv[1])
_icons = _resdir + "/icons/"
sys.path.append( xbmc.translatePath(_resdir + "/lib/"))
import rai
_tg1Icon=xbmc.translatePath(_icons +"Tg1_logo.png")
_tg2Icon=xbmc.translatePath(_icons +"Tg2_logo.png")
_tg3Icon=xbmc.translatePath(_icons +"Tg3_logo.png")
def _addItem(label,uri,icon,isFolder=False):
item = xbmcgui.ListItem(label, iconImage=icon)
xbmcplugin.addDirectoryItem(_thisPlugin,uri,item,isFolder)
def _get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
param = _get_params()
plugins = {
'1':(rai.RaiUno, 'Guarda il TG1',_tg1Icon),
'2':(rai.RaiDue, 'Guarda il TG2',_tg2Icon),
'3':(rai.RaiTre, 'Guarda il TG3',_tg3Icon)
}
if 'plugin' in p
|
aram:
(engine, title, eicon)=plugins[param['plugin']]
for (name,url,icon) in engine().get():
if icon == '':
icon = eicon
_addItem(name,url,icon)
xbmcplugin.endOfDirectory(_thisPlugin)
else:
for n in sorted(plugins.iterkeys()):
(engine, title, icon)=plugins[n]
print title
_addItem(title,sys.argv[0]+'?plugin='+n,icon,isFolder=True)
xbmcplugin.endOfDirectory(_thisPlugin)
#for
|
(name,url,icon) in tg1:
# _addItem(name,url,icon)
#xbmcplugin.endOfDirectory(_thisPlugin)
|
luotao1/Paddle
|
python/paddle/fluid/ir.py
|
Python
|
apache-2.0
| 22,516
| 0.000622
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
from os import path
import paddle
from . import core, unique_name
from .framework import _apply_pass, OpProtoHolder
from .proto import framework_pb2
try:
from .proto import pass_desc_pb2
except ModuleNotFoundError:
import sys
sys.path.append(path.join(path.dirname(__file__), 'proto'))
from .proto import pass_desc_pb2
def get_data_vars(program):
data_vars = []
for var_name, var in program.global_block().vars.items():
if var.is_data:
data_vars.append(var_name)
return data_vars
def _update_grad_persistable(main_program):
grad_merge_attr_name = "grad_merge_cond_name"
op_role_var_attr_name = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
has_grad_merge = False
has_persistable_grad_var = False
grad_vars = []
for block_id in range(main_program.num_blocks):
block = main_program.block(block_id)
for op in block.ops:
if grad_merge_attr_name in op.attr_names:
has_grad_merge = True
if op_role_var_attr_name not in op.attr_names:
continue
p_g = op.attr(op_role_var_attr_name)
for g in p_g[1::2]:
g_var = block._find_var_recursive(g)
if g_var is None:
continue
grad_vars.append(g_var)
if g_var.persistable:
has_persistable_grad_var = True
if has_grad_merge and has_persistable_grad_var:
for g_var in grad_vars:
g_var.persistable = True
def apply_build_strategy(main_program, startup_program, build_strategy,
pass_attrs):
def update_attr(attrs, attr_types, name, value, typ=None):
if name not in attrs:
attrs[name] = value
if typ:
attr_types[name] = typ
def apply_pass(name):
attrs = dict(pass_attrs)
attr_types = {}
update_attr(attrs, attr_types, "nranks", 1, "size_t")
update_attr(attrs, attr_types, "use_cuda", False, "bool")
# TODO(zjl): how to skip fetch variables ?
update_attr(attrs, attr_types, "mem_opt_skip_vars",
get_data_vars(main_program), "list[str]")
_apply_pass(main_program, startup_program, name, attrs, attr_types)
_update_grad_persistable(main_program)
use_cuda = pass_attrs.get("use_cuda", False)
build_strategy = build_strategy._copy()
if build_strategy.sync_batch_norm:
apply_pass("sync_batch_norm_pass")
build_strategy.sync_batch_norm = False
if build_strategy.fuse_
|
relu_depthwise_conv and use_cuda:
apply_pass("fuse_relu_depthwise_conv_pass")
build_strategy.fuse_relu_depthwise_conv = False
if build_strategy.fuse_bn_act_ops and use_cuda:
apply_pass("fuse_bn_act_pass")
build_strategy.fuse_bn_act_ops = False
if build_strategy.fuse_bn_add_act_ops and use_cuda:
apply_pass("fuse_bn_add_act_pas
|
s")
build_strategy.fuse_bn_add_act_ops = False
if build_strategy.enable_auto_fusion and use_cuda:
apply_pass("fusion_group_pass")
build_strategy.enable_auto_fusion = False
if build_strategy.fuse_elewise_add_act_ops:
apply_pass("fuse_elewise_add_act_pass")
build_strategy.fuse_elewise_add_act_ops = False
if build_strategy.fuse_all_optimizer_ops:
apply_pass([
"coalesce_grad_tensor_pass",
"fuse_adam_op_pass",
"fuse_sgd_op_pass",
"fuse_momentum_op_pass",
])
build_strategy.fuse_all_optimizer_ops = False
# TODO(zjl): support fuse all reduce ops
if build_strategy.cache_runtime_context:
apply_pass("runtime_context_cache_pass")
build_strategy.cache_runtime_context = False
if build_strategy.enable_addto and use_cuda:
# NOTE: how to get fetch vars to skip memory optimization?
apply_pass("inplace_addto_op_pass")
build_strategy.enable_addto = False
if build_strategy.enable_inplace:
apply_pass("buffer_shared_inplace_pass")
build_strategy.enable_inplace = False
build_strategy._clear_finalized()
return build_strategy
class RegisterPassHelper(object):
_register_helpers = list()
def __init__(self, pass_pairs, pass_type=str(), input_specs=dict()):
self._pass_type = pass_type
self._pass_pairs = pass_pairs
self._input_specs = input_specs
RegisterPassHelper._register_helpers.append(self)
def _get_args_from_func(self, func):
args = list()
arg_specs = inspect.getfullargspec(func)
for arg_name in arg_specs.args:
input_spec = self._input_specs.get(arg_name)
if isinstance(input_spec, paddle.static.InputSpec):
args.append(
PassDesc.VarHelper(arg_name, input_spec.shape,
input_spec.dtype))
elif isinstance(input_spec, paddle.ParamAttr):
args.append(paddle.ParamAttr(arg_name))
else:
args.append(PassDesc.VarHelper(arg_name, [-1]))
return args
def _prune_program_desc(self, ops):
for op_desc in ops:
default_attrs = core.get_op_attrs_default_value(
paddle.compat.to_bytes(op_desc.type))
remove_attrs = list()
for attr in op_desc.attrs:
# attr must not in
if attr.name not in [
"op_namescope", "op_callstack", "op_device"
]:
attr_list_fields = attr.ListFields()
# attr format must be: name, type, value
if len(attr_list_fields) == 3:
attr_value = attr.ListFields()[-1][-1]
default_attr_value = default_attrs.get(attr.name)
# value must not default
if default_attr_value != attr_value:
continue
remove_attrs.append(attr)
for attr in remove_attrs:
op_desc.attrs.remove(attr)
def _func_to_program_desc(self, func, ops):
vars = list()
program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(program, startup_program):
args = self._get_args_from_func(func)
vars.extend(args)
outs = func(*args)
if not isinstance(outs, (list, tuple)):
outs = [outs]
for out in outs:
if isinstance(out, PassDesc.OpHelper):
op_outs = out.Outputs()
if len(op_outs) != 1:
raise ValueError(
"Operator '{}' has multiple outputs, please specify one output variable.".
format(out._type))
for op_out in op_outs.values():
vars.extend(op_out)
else:
vars.append(out)
block_desc = program.current_block().desc
for i in range(block_desc.op_size()):
ops.add().ParseFromString(block_desc.op(i).serialize_to_string())
self._prune_program_desc(ops)
return vars, program.current_block().ops
def _convert_vars_to_pass_desc(self, patterns, replaces, desc):
def _add_element_conditions(conditions, elements):
for element in elements:
if element._condition:
|
smartscheduling/scikit-learn-categorical-tree
|
sklearn/cluster/tests/test_dbscan.py
|
Python
|
bsd-3-clause
| 10,974
| 0
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense,
|
core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6
|
).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal
|
followthesheep/galpy
|
galpy/df_src/streamdf.py
|
Python
|
bsd-3-clause
| 117,381
| 0.021826
|
#The DF of a tidal stream
import copy
import numpy
import multiprocessing
import scipy
from scipy import special, interpolate, integrate
if int(scipy.__version__.split('.')[1]) < 10: #pragma: no cover
from scipy.maxentropy import logsumexp
else:
from scipy.misc import logsumexp
from galpy.orbit import Orbit
from galpy.util import bovy_coords, fast_cholesky_invert, \
bovy_conversion, multi, bovy_plot, stable_cho_factor, bovy_ars
import warnings
from galpy.util import galpyWarning
_INTERPDURINGSETUP= True
_USEINTERP= True
_USESIMPLE= True
_labelDict= {'x'
|
: r'$X$',
'y': r'$Y$',
'z': r'$Z$',
'r': r'$R$',
'phi': r'$\phi$',
'vx':r'$V_X$',
'vy':r'
|
$V_Y$',
'vz':r'$V_Z$',
'vr':r'$V_R$',
'vt':r'$V_T$',
'll':r'$\mathrm{Galactic\ longitude\, (deg)}$',
'bb':r'$\mathrm{Galactic\ latitude\, (deg)}$',
'dist':r'$\mathrm{distance\, (kpc)}$',
'pmll':r'$\mu_l\,(\mathrm{mas\,yr}^{-1})$',
'pmbb':r'$\mu_b\,(\mathrm{mas\,yr}^{-1})$',
'vlos':r'$V_{\mathrm{los}}\,(\mathrm{km\,s}^{-1})$'}
class streamdf(object):
"""The DF of a tidal stream"""
def __init__(self,sigv,progenitor=None,pot=None,aA=None,
tdisrupt=None,sigMeanOffset=6.,leading=True,
sigangle=None,
deltaAngleTrack=None,nTrackChunks=None,nTrackIterations=None,
progIsTrack=False,
Vnorm=220.,Rnorm=8.,
R0=8.,Zsun=0.025,vsun=[-11.1,8.*30.24,7.25],
multi=None,interpTrack=_INTERPDURINGSETUP,
useInterp=_USEINTERP,nosetup=False):
"""
NAME:
__init__
PURPOSE:
Initialize a quasi-isothermal DF
INPUT:
sigv - radial velocity dispersion of the progenitor
tdisrupt= (5 Gyr) time since start of disruption (natural units)
leading= (True) if True, model the leading part of the stream
if False, model the trailing part
progenitor= progenitor orbit as Orbit instance (will be re-integrated, so don't bother integrating the orbit before)
progIsTrack= (False) if True, then the progenitor (x,v) is actually the (x,v) of the stream track at zero angle separation; useful when initializing with an orbit fit; the progenitor's position will be calculated
pot= Potential instance or list thereof
aA= actionAngle instance used to convert (x,v) to actions
sigMeanOffset= (6.) offset between the mean of the frequencies
and the progenitor, in units of the largest
eigenvalue of the frequency covariance matrix
(along the largest eigenvector), should be positive;
to model the trailing part, set leading=False
sigangle= (sigv/122/[1km/s]=1.8sigv in natural coordinates)
estimate of the angle spread of the debris initially
deltaAngleTrack= (None) angle to estimate the stream track over (rad)
nTrackChunks= (floor(deltaAngleTrack/0.15)+1) number of chunks to divide the progenitor track in
nTrackIterations= Number of iterations to perform when establishing the track; each iteration starts from a previous approximation to the track in (x,v) and calculates a new track based on the deviation between the previous track and the desired track in action-angle coordinates; if not set, an appropriate value is determined based on the magnitude of the misalignment between stream and orbit, with larger numbers of iterations for larger misalignments
interpTrack= (might change), interpolate the stream track while
setting up the instance (can be done by hand by
calling self._interpolate_stream_track() and
self._interpolate_stream_track_aA())
useInterp= (might change), use interpolation by default when
calculating approximated frequencies and angles
nosetup= (False) if True, don't setup the stream track and anything
else that is expensive
multi= (None) if set, use multi-processing
Coordinate transformation inputs:
Vnorm= (220) circular velocity to normalize velocities with
Rnorm= (8) Galactocentric radius to normalize positions with
R0= (8) Galactocentric radius of the Sun (kpc)
Zsun= (0.025) Sun's height above the plane (kpc)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
object
HISTORY:
2013-09-16 - Started - Bovy (IAS)
2013-11-25 - Started over - Bovy (IAS)
"""
self._sigv= sigv
if tdisrupt is None:
self._tdisrupt= 5./bovy_conversion.time_in_Gyr(Vnorm,Rnorm)
else:
self._tdisrupt= tdisrupt
self._sigMeanOffset= sigMeanOffset
if pot is None: #pragma: no cover
raise IOError("pot= must be set")
self._pot= pot
self._aA= aA
if not self._aA._pot == self._pot:
raise IOError("Potential in aA does not appear to be the same as given potential pot")
if (multi is True): #if set to boolean, enable cpu_count processes
self._multi= multiprocessing.cpu_count()
else:
self._multi= multi
self._progenitor_setup(progenitor,leading)
self._offset_setup(sigangle,leading,deltaAngleTrack)
# if progIsTrack, calculate the progenitor that gives a track that is approximately the given orbit
if progIsTrack:
self._setup_progIsTrack()
self._setup_coord_transform(Rnorm,Vnorm,R0,Zsun,vsun,progenitor)
#Determine the stream track
if not nosetup:
self._determine_nTrackIterations(nTrackIterations)
self._determine_stream_track(nTrackChunks)
self._useInterp= useInterp
if interpTrack or self._useInterp:
self._interpolate_stream_track()
self._interpolate_stream_track_aA()
self.calc_stream_lb()
self._determine_stream_spread()
return None
def _progenitor_setup(self,progenitor,leading):
"""The part of the setup relating to the progenitor's orbit"""
#Progenitor orbit: Calculate actions, frequencies, and angles for the progenitor
self._progenitor= progenitor() #call to get new Orbit
# Make sure we do not use physical coordinates
self._progenitor.turn_physical_off()
acfs= self._aA.actionsFreqsAngles(self._progenitor,maxn=3,
_firstFlip=(not leading))
self._progenitor_jr= acfs[0][0]
self._progenitor_lz= acfs[1][0]
self._progenitor_jz= acfs[2][0]
self._progenitor_Omegar= acfs[3]
self._progenitor_Omegaphi= acfs[4]
self._progenitor_Omegaz= acfs[5]
self._progenitor_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3)
self._progenitor_angler= acfs[6]
self._progenitor_anglephi= acfs[7]
self._progenitor_anglez= acfs[8]
self._progenitor_angle= numpy.array([acfs[6],acfs[7],acfs[8]]).reshape(3)
#Calculate dO/dJ Jacobian at the progenitor
self._dOdJp= calcaAJac(self._progenitor._orb.vxvv,
self._aA,dxv=None,dOdJ=True,
_initacfs=acfs)
self._dOdJpEig= numpy.linalg.eig(self._dOdJp)
return None
def _offset_setup(self,sigangle,leading,deltaAngleTrack):
"""The part of the setup related to calculating the stream/progenitor offset"""
#From the progenitor orbit, determine the sigmas in J and angle
self._sigjr= (self._progenitor.rap()-self._progenitor.rperi())/numpy.pi*self._sigv
self._siglz= self._progenitor.rperi()*self._sigv
|
datapythonista/datapythonista.github.io
|
docs/new-pandas-doc/generated/pandas-DataFrame-plot-bar-2.py
|
Python
|
apache-2.0
| 279
| 0
|
speed
|
= [0.1, 17.5, 40, 48, 52, 69, 88]
lifespan = [2, 8, 70, 1.5, 25, 12, 28]
index = ['snail', 'pig', 'elephant',
'rabbit', 'giraffe', 'coyote', 'horse']
df = pd.DataFrame({'speed': speed,
'lifespan': lifespan}, index=ind
|
ex)
ax = df.plot.bar(rot=0)
|
prasanna08/oppia
|
core/controllers/reader_test.py
|
Python
|
apache-2.0
| 102,089
| 0.000431
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the page that allows learners to play through an exploration."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # p
|
ylint: disable=import-only-modules
import logging
from constants import constants
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import learner_progress_services
from core.domain import param_domain
from core.domain import question_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import skill_services
from core.domain import stats_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
from core.tests import test_utils
import feconf
import python_utils
(classifier_models, stats_models) = models.Registry.import_models(
[models.NAMES.classifier, models.NAMES.statistics])
class ReaderPermissionsTest(test_utils.GenericTestBase):
"""Test permissions for readers to view explorations."""
EXP_ID = 'eid'
def setUp(self):
"""Before each individual test, create a dummy exploration."""
super(ReaderPermissionsTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.editor = user_services.UserActionsInfo(self.editor_id)
self.exploration = self.save_new_valid_exploration(
self.EXP_ID, self.editor_id, title=self.UNICODE_TEST_STRING,
category=self.UNICODE_TEST_STRING)
def test_unpublished_explorations_are_invisible_to_logged_out_users(self):
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID),
expected_status_int=404)
def test_unpublished_explorations_are_invisible_to_unconnected_users(self):
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID),
expected_status_int=404)
self.logout()
def test_unpublished_explorations_are_invisible_to_other_editors(self):
other_editor_email = 'another@example.com'
self.signup(other_editor_email, 'othereditorusername')
other_exploration = exp_domain.Exploration.create_default_exploration(
'eid2')
exp_services.save_new_exploration(
other_editor_email, other_exploration)
self.login(other_editor_email)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID),
expected_status_int=404)
self.logout()
def test_unpublished_explorations_are_visible_to_their_editors(self):
self.login(self.EDITOR_EMAIL)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID))
self.logout()
def test_unpublished_explorations_are_visible_to_admins(self):
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.login(self.ADMIN_EMAIL)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID))
self.logout()
def test_published_explorations_are_visible_to_logged_out_users(self):
rights_manager.publish_exploration(self.editor, self.EXP_ID)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID))
def test_published_explorations_are_visible_to_logged_in_users(self):
rights_manager.publish_exploration(self.editor, self.EXP_ID)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID))
def test_exploration_page_with_iframed_redirects(self):
self.login(self.EDITOR_EMAIL)
exp_version = self.exploration.version
response = self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID), params={
'parent': True,
'iframed': True,
'v': exp_version
}, expected_status_int=302
)
self.assertTrue(
response.headers['Location'].endswith(
'/embed/exploration/%s?v=%s' % (self.EXP_ID, exp_version)))
self.logout()
def test_exploration_page_raises_error_with_invalid_exploration_version(
self):
self.login(self.EDITOR_EMAIL)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID), params={
'v': 10,
'parent': True
}, expected_status_int=404
)
self.logout()
class FeedbackIntegrationTest(test_utils.GenericTestBase):
"""Test the handler for giving feedback."""
def test_give_feedback_handler(self):
"""Test giving feedback handler."""
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
# Load demo exploration.
exp_id = '0'
exp_services.delete_demo('0')
exp_services.load_demo('0')
# Viewer opens exploration.
self.login(self.VIEWER_EMAIL)
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exp_id))
state_name_1 = exploration_dict['exploration']['init_state_name']
# Viewer gives 1st feedback.
self.post_json(
'/explorehandler/give_feedback/%s' % exp_id,
{
'state_name': state_name_1,
'feedback': 'This is a feedback message.',
}
)
self.logout()
class ExplorationPretestsUnitTest(test_utils.GenericTestBase):
"""Test the handler for initialising exploration with
state_classifier_mapping.
"""
def setUp(self):
"""Before each individual test, initialize data."""
super(ExplorationPretestsUnitTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.skill_id = skill_services.get_new_skill_id()
self.save_new_skill(
self.skill_id, 'user', description='Description')
def test_get_exploration_pretests(self):
super(ExplorationPretestsUnitTest, self).setUp()
story_id = story_services.get_new_story_id()
topic_id = topic_services.get_new_topic_id()
self.save_new_topic(
topic_id, 'user', name='Topic',
description='A new topic', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=0)
self.save_new_story(story_id, 'user', topic_id)
topic_services.add_canonical_story('user', topic_id, story_id)
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 1'
})
]
story_services.update_story('user', story_id, changelist, '
|
google-research/tf-slim
|
tf_slim/data/parallel_reader.py
|
Python
|
apache-2.0
| 11,775
| 0.002803
|
# coding=utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a parallel data reader with queues and optional shuffling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary
from tensorflow.python.training import input as tf_input
from tensorflow.python.training import queue_runner
# pylint:enable=g-direct-tensorflow-import
class ParallelReader(io_ops.ReaderBase):
"""Reader class that uses multiple readers in parallel to improve speed.
See ReaderBase for supported methods.
"""
def __init__(self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None):
"""ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.queue.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.compat.v1.TFRecordReader, common_queue)
common_queue = tf.queue.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.queue.FIFOQueue()`, `tf.queue.RandomShuffleQueue()`,
...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
"""
if len(common_queue.dtypes) != 2:
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
for dtype in common_queue.dtypes:
if not dtype.is_compatible_with(tf_dtypes.string):
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
reader_kwargs = reader_kwargs or {}
self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
self._common_queue = common_queue
@property
def num_readers(self):
return len(self._readers)
@property
def common_queue(self):
return self._common_queue
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by the reader.
The multiple reader instances are all configured to `read()` from the
filenames listed in `queue` and enqueue their output into the `common_queue`
passed to the constructor, and this method returns the next record dequeued
from that `common_queue`.
Readers dequeue a work unit from `queue` if necessary (e.g. when a
reader needs to start reading from a new file since it has finished with
the previous file).
A queue runner for enqueuing in the `common_queue` is automatically added
to the TF QueueRunners collection.
Args:
queue: A Queue or a mutable string Tensor representing a handle to a
Queue, with string work items.
name: A name for the operation (optional).
Returns:
The next record (i.e. (key, value pair)) from the common_queue.
"""
self._configure_readers_by(queue)
return self._common_queue.dequeue(name=name)
def read_up_to(self, queue, num_records, name=None):
"""Returns up to num_records (key, value pairs) produced by a reader.
Will dequeue a work unit from queue if necessary (e.g., when the
Reader needs to start reading from a new file since it has
finished with the previous file).
It may return less than num_records even before the last batch.
**Note** This operation is not supported by all types of `common_queue`s.
If a `common_queue` does not support `dequeue_up_to()`, then a
`tf.errors.UnimplementedError` is raised.
Args:
queue: A Queue or a mutable string Tensor representing a handle to a
Queue, with string work items.
num_records: Number of records to read.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (keys, values) from common_queue.
keys: A 1-D string Tensor.
values: A 1-D string Tensor.
"""
self._configure_readers_by(queue)
return self._common_queue.dequeue_up_to(num_records, name)
def _configure_readers_by(self, queue):
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(self._common_queue, enqueue_ops))
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_records = [r.num_records_produced() for r in self._readers]
return math_ops.add_n(num_records, name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_work_units = [r.num_work_units_completed() for r in self._readers]
return math_ops.add_n(num_work_units, name=name)
def parallel_read(data_sources,
reader_class,
num_epochs=None,
num_readers=4,
reader_kwargs=None,
shuffle=True,
dtypes=None,
capacity=256,
min_after_dequeue=128,
seed=None,
scope=None):
"""Reads multiple records in parallel from d
|
ata_sources using n readers.
It uses a ParallelReader to read from multiple files in parallel using
multiple readers created using `reader_class` with `reader_kwargs'.
If shuffle is True the common_queue would be a RandomShuffleQueue otherwise
it would be a FIFOQueue.
Usage:
data_sources = ['path_to/train*']
key, value = parallel_read(data_sources, tf.CSVReader, num_readers=4)
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
|
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io
|
alexiwamoto/django-rest-api
|
rest_api/admin.py
|
Python
|
mit
| 279
| 0
|
from django.contrib import admin
from .models import Produto, Foto
from r
|
est_framework.authtoken.admin import TokenAdmin
TokenAdmin.raw_id_fields = ('user',)
# Register your models here.
# admin.site.register(Bucketlist)
admin.site.register(P
|
roduto)
admin.site.register(Foto)
|
rolandgeider/wger
|
wger/nutrition/tests/test_calories_calculator.py
|
Python
|
agpl-3.0
| 6,686
| 0.00015
|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import datetime
import decimal
import json
# Django
from django.contrib.auth.models import User
from django.urls import reverse
# wger
from wger.core.tests.base_testcase import WgerTestCase
from wger.utils.constants import TWOPLACES
from wger.weight.models import WeightEntry
class CaloriesCalculatorTestCase(WgerTestCase):
"""
Tests the calories calculator methods and views
"""
def test_page(self):
"""
Access the page
"""
response = self.client.get(reverse('nutrition:calories:view'))
self.assertEqual(response.status_code, 302)
self.user_login('test')
response = self.client.get(reverse('nutrition:calories:view'))
self.assertEqual(response.status_code, 200)
def test_calculator(self):
"""
Tests the calculator itself
"""
self.user_login('test')
response = self.client.post(reverse('nutrition:calories:activities'),
{'sleep_hours': 7,
'work_hours': 8,
'work_intensity': 1,
'sport_hours': 6,
'sport_intensity': 3,
'freetime_hours': 8,
'freetime_intensity': 1})
self.assertEqual(response.status_code, 200)
result = json.loads(response.content.decode('utf8'))
self.assertEqual(decimal.Decimal(result['factor']),
decimal.Decimal(1.57).quantize(TWOPLACES))
self.assertEqual(decimal.Decimal(result['activities']), decimal.Decimal(2920))
def test_automatic_weight_entry_bmi(self):
"""
Tests that weight entries are automatically created or updated
"""
self.user_login('test')
user = User.objects.get(username=self.current_user)
# Existing weight entry is old, a new one is created
entry1 = WeightEntry.objects.filter(user=user).latest()
response = self.client.post(reverse('nutrition:bmi:calculate'),
{'height': 180,
'weight': 80})
self.assertEqual(response.status_code, 200)
entry2 = WeightEntry.objects.filter(user=user).latest()
self.assertEqual(entry1.weight, 83)
self.assertEqual(entry2.weight, 80)
# Existing weight entry is from today, is updated
entry2.delete()
entry1.date = datetime.date.today()
entry1.save()
response = self.client.post(reverse('nutrition:bmi:calculate'),
{'height': 180,
'weight': 80})
self.assertEqual(response.status_code, 200)
entry2 = WeightEntry.objects.filter(user=user).latest()
self.assertEqual(entry1.pk, entry2.pk)
self.assertEqual(entry2.weight, 80)
# No existing entries
WeightEntry.objects.filter(user=user).delete()
response = self.client.post(reverse('nutrition:bmi:calculate'),
|
{'height': 180,
'weight': 80})
self.assertEqual(response.statu
|
s_code, 200)
entry = WeightEntry.objects.filter(user=user).latest()
self.assertEqual(entry.weight, 80)
self.assertEqual(entry.date, datetime.date.today())
def test_bmr(self):
"""
Tests the BMR view
"""
self.user_login('test')
response = self.client.post(reverse('nutrition:calories:bmr'),
{'age': 30,
'height': 180,
'gender': 1,
'weight': 80})
self.assertEqual(response.status_code, 200)
result = json.loads(response.content.decode('utf8'))
self.assertEqual(result, {'bmr': '1780'})
def test_automatic_weight_entry_bmr(self):
"""
Tests that weight entries are automatically created or updated
"""
self.user_login('test')
user = User.objects.get(username=self.current_user)
# Existing weight entry is old, a new one is created
entry1 = WeightEntry.objects.filter(user=user).latest()
response = self.client.post(reverse('nutrition:calories:bmr'),
{'age': 30,
'height': 180,
'gender': 1,
'weight': 80})
self.assertEqual(response.status_code, 200)
entry2 = WeightEntry.objects.filter(user=user).latest()
self.assertEqual(entry1.weight, 83)
self.assertEqual(entry2.weight, 80)
# Existing weight entry is from today, is updated
entry2.delete()
entry1.date = datetime.date.today()
entry1.save()
response = self.client.post(reverse('nutrition:calories:bmr'),
{'age': 30,
'height': 180,
'gender': 1,
'weight': 80})
self.assertEqual(response.status_code, 200)
entry2 = WeightEntry.objects.filter(user=user).latest()
self.assertEqual(entry1.pk, entry2.pk)
self.assertEqual(entry2.weight, 80)
# No existing entries
WeightEntry.objects.filter(user=user).delete()
response = self.client.post(reverse('nutrition:calories:bmr'),
{'age': 30,
'height': 180,
'gender': 1,
'weight': 80})
self.assertEqual(response.status_code, 200)
entry = WeightEntry.objects.filter(user=user).latest()
self.assertEqual(entry.weight, 80)
self.assertEqual(entry.date, datetime.date.today())
|
JaneliaSciComp/Neuroptikon
|
Source/documentation/__init__.py
|
Python
|
bsd-3-clause
| 747
| 0.008032
|
""" Documentation package """
import neuroptikon
import wx, wx.html
import os.path, sys, urllib
_sharedFrame = None
def baseURL():
if neuroptikon.runningFromSource:
basePath = os.path.join(neuroptikon.rootDir, 'documentation', 'build', 'Documentation')
else:
basePath = os.path.join(neuroptikon.rootDir, 'documentation')
return 'f
|
ile:' + urllib.pathname2url(basePath) + '/'
def showPage(page):
pageURL = baseURL() + page
# Try to open an embedded WebKit-based help browser.
try:
import documentation_frame
documentation_frame.showPage(pageURL)
except:
# Fall back to using the user's default
|
browser outside of Neuroptikon.
wx.LaunchDefaultBrowser(pageURL)
|
bitdeal/bitdeal
|
qa/rpc-tests/test_framework/test_framework.py
|
Python
|
mit
| 7,473
| 0.002141
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitdealds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't s
|
top bitdealds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../.
|
./../src"),
help="Source directory containing bitdeald/bitdeal-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: bitdealds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LITECOIND", "bitdeald"),
help="bitdeald binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("LITECOIND", "bitdeald"),
help="bitdeald binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
|
sandan/sqlalchemy
|
test/sql/test_compiler.py
|
Python
|
mit
| 148,745
| 0.00002
|
#! coding:utf-8
"""
compiler tests.
These tests are among the very first that were written when SQLAlchemy
began in 2005. As a result the testing style here is very dense;
it's an ongoing job to break these into much smaller tests with correct pep8
styling and coherent test organization.
"""
from sqlalchemy.testing import eq_, is_, assert_raises, \
assert_raises_message, eq_ignore_whitespace
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import Integer, String, MetaData, Table, Column, select, \
func, not_, cast, text, tuple_, exists, update, bindparam,\
literal, and_, null, type_coerce, alias, or_, literal_column,\
Float, TIMESTAMP, Numeric, Date, Text, union, except_,\
intersect, union_all, Boolean, distinct, join, outerjoin, asc, desc,\
over, subquery, case, true, CheckConstraint
import decimal
from sqlalchemy.util import u
from sqlalchemy import exc, sql, util, types, schema
from sqlalchemy.sql import table, column, label
from sqlalchemy.sql.expression import ClauseList, _literal_as_text, HasPrefixes
from sqlalchemy.engine import default
from sqlalchemy.dialects import mysql, mssql, postgresql, oracle, \
sqlite, sybase
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import compiler
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String),
)
table3 = table(
'thirdtable',
column('userid', Integer),
column('otherstuff', String),
)
metadata = MetaData()
# table with a schema
table4 = Table(
'remotetable', metadata,
Column('rem_id', Integer, primary_key=True),
Column('datatype_id', Integer),
Column('value', String(20)),
schema='remote_owner'
)
# table with a 'multipart' schema
table5 = Table(
'remotetable', metadata,
Column('rem_id', Integer, primary_key=True),
Column('datatype_id', Integer),
Column('value', String(20)),
schema='dbo.remote_owner'
)
users = table('users',
column('user_id'),
column('user_name'),
column('password'),
)
addresses = table('addresses',
column('address_id'),
column('user_id'),
column('street'),
column('city'),
column('state'),
column('zip')
)
keyed = Table('keyed', metadata,
Column('x', Integer, key='colx'),
Column('y', Integer, key='coly'),
Column('z', Integer),
)
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_attribute_sanity(self):
assert hasattr(table1, 'c')
assert hasattr(table1.select(), 'c')
assert not hasattr(table1.c.myid.self_group(), 'columns')
assert hasattr(table1.select().self_group(), 'columns')
assert not hasattr(table1.c.myid, 'columns')
assert not hasattr(table1.c.myid, 'c')
assert not hasattr(table1.select().c.myid, 'c')
assert not hasattr(table1.select().c.myid, 'columns')
assert not hasattr(table1.alias().c.myid, 'columns')
assert not hasattr(table1.alias().c.myid, 'c')
if util.compat.py32:
assert_raises_message(
exc.InvalidRequestError,
'Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.',
lambda: hasattr(
select([table1.c.myid]).as_scalar().self_group(),
'columns'))
assert_raises_message(
exc.InvalidRequestError,
'Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.',
lambda: hasattr(select([table1.c.myid]).as_scalar(),
'columns'))
else:
assert not hasattr(
select([table1.c.myid]).as_scalar().self_group(),
'columns')
assert not hasattr(select([table1.c.myid]).as_scalar(), 'columns')
def test_prefix_constructor(self):
class Pref(HasPrefixes):
def _generate(self):
return self
assert_raises(exc.ArgumentError,
Pref().prefix_with,
"some prefix", not_a_dialect=True
)
def test_table_select(self):
self.assert_compile(table1.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable")
self.assert_compile(
select(
[
table1,
table2]),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myot
|
hertable.otherid, myothertable.othername FROM mytable, "
"myothertable")
def test_invalid_col_argument(self):
assert_raises(exc.ArgumentError, select, table1)
assert_raises(exc.ArgumentError, select, table1.c.myid)
def test_int_limit_offset_coercion(self):
for given, exp in [
("5", 5),
(5, 5),
(5.2, 5),
(decimal.Decimal("5"), 5),
(None, None),
|
]:
eq_(select().limit(given)._limit, exp)
eq_(select().offset(given)._offset, exp)
eq_(select(limit=given)._limit, exp)
eq_(select(offset=given)._offset, exp)
assert_raises(ValueError, select().limit, "foo")
assert_raises(ValueError, select().offset, "foo")
assert_raises(ValueError, select, offset="foo")
assert_raises(ValueError, select, limit="foo")
def test_limit_offset_no_int_coercion_one(self):
exp1 = literal_column("Q")
exp2 = literal_column("Y")
self.assert_compile(
select([1]).limit(exp1).offset(exp2),
"SELECT 1 LIMIT Q OFFSET Y"
)
self.assert_compile(
select([1]).limit(bindparam('x')).offset(bindparam('y')),
"SELECT 1 LIMIT :x OFFSET :y"
)
def test_limit_offset_no_int_coercion_two(self):
exp1 = literal_column("Q")
exp2 = literal_column("Y")
sel = select([1]).limit(exp1).offset(exp2)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for limit",
getattr, sel, "_limit"
)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for offset",
getattr, sel, "_offset"
)
def test_limit_offset_no_int_coercion_three(self):
exp1 = bindparam("Q")
exp2 = bindparam("Y")
sel = select([1]).limit(exp1).offset(exp2)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for limit",
getattr, sel, "_limit"
)
assert_raises_message(
exc.CompileError,
"This SELECT structure does not use a simple integer "
"value for offset",
getattr, sel, "_offset"
)
def test_limit_offset(self):
for lim, offset, exp, params in [
(5, 10, "LIMIT :param_1 OFFSET :param_2",
{'param_1': 5, 'param_2': 10}),
(None, 10, "LIMIT -1 OFFSET :param_1", {'param_1': 10}),
(5, None, "LIMIT :param_1", {'param_1': 5}),
(0, 0, "LIMIT :param_1 OFFSET :param_2",
{'param_1': 0, 'param_2': 0}),
]:
self.assert_compile(
select([1]).limit(lim).offset(offset),
"SELECT 1 " + exp,
checkparams=params
)
def test_limit_offset_select_l
|
hcs/mailman
|
src/mailman/commands/cli_control.py
|
Python
|
gpl-3.0
| 7,366
| 0.000543
|
# Copyright (C) 2009-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Module stuff."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'Reopen',
'Restart',
'Start',
'Stop',
]
import os
import sys
import errno
import signal
import logging
from zope.interface import implementer
from mailman.bin.master import WatcherState, master_state
from mailman.config import config
from mailman.core.i18n import _
from mailman.interfaces.command import ICLISubCommand
qlog = logging.getLogger('mailman.runner')
@implementer(ICLISubCommand)
class Start:
"""Start the Mailman daemons."""
name = 'start'
def add(self, parser, command_parser):
"""See `ICLISubCommand`."""
self.parser = parser
command_parser.add_argument(
'-f', '--force',
default=False, action='store_true',
help=_("""\
If the master watcher finds an existing master lock, it will
normally exit with an error message. With this option,the master
will perform an extra level of checking. If a process matching
the host/pid described in the lock file is running, the master
will still exit, requiring you to manually clean up the lock. But
if no matching process is found, the master will remove the
apparently stale lock and make another attempt to claim the master
lock."""))
command_parser.add_argument(
'-u', '--run-as-user',
default=True, action='store_false',
help=_("""\
Normally, this script will refuse to run if the user id and group
id are not set to the 'mailman' user and group (as defined when
you configured Mailman). If run as root, this script will change
to this user and group before the check is made.
This can be inconvenient for testing and debugging purposes, so
the -u flag means that the step that sets and checks the uid/gid
is skipped, and the program is run as the current user and group.
This flag is not recommended for normal production environments.
Note though, that if you run with -u and are not in the mailman
group, you may have permission problems, such as begin unable to
delete a list's archives through the web. Tough luck!"""))
command_parser.add_argument(
'-q', '--quiet',
default=False, action='store_true',
help=_("""\
Don't print status messages. Error messages are still printed to
standard error."""))
def process(self, args):
"""See `ICLISubCommand`."""
# Although there's a potential race condition here, it's a better user
# experience for the parent process to refuse to start twice, rather
# than having it try to start the master, which will error exit.
status, lock = master_state()
if status is WatcherState.conflict:
self.parser.error(_('GNU Mailman is already running'))
elif status in (WatcherState.stale_lock, WatcherState.host_mismatch):
if args.force is None:
self.parser.error(
_('A previous run of GNU Mailman did not exit '
'cleanly. Try using --force.'))
def log(message):
if not args.quiet:
print(message)
# Daemon process startup according to Stevens
|
, Advanced Programming in
# the UN
|
IX Environment, Chapter 13.
pid = os.fork()
if pid:
# parent
log(_("Starting Mailman's master runner"))
return
# child: Create a new session and become the session leader, but since
# we won't be opening any terminal devices, don't do the
# ultra-paranoid suggestion of doing a second fork after the setsid()
# call.
os.setsid()
# Instead of cd'ing to root, cd to the Mailman runtime directory.
# However, before we do that, set an environment variable used by the
# subprocesses to calculate their path to the $VAR_DIR.
os.environ['MAILMAN_VAR_DIR'] = config.VAR_DIR
os.chdir(config.VAR_DIR)
# Exec the master watcher.
execl_args = [
sys.executable, sys.executable,
os.path.join(config.BIN_DIR, 'master'),
]
if args.force:
execl_args.append('--force')
if args.config:
execl_args.extend(['-C', args.config])
qlog.debug('starting: %s', execl_args)
os.execl(*execl_args)
# We should never get here.
raise RuntimeError('os.execl() failed')
def kill_watcher(sig):
try:
with open(config.PID_FILE) as fp:
pid = int(fp.read().strip())
except (IOError, ValueError) as error:
# For i18n convenience
print(_('PID unreadable in: $config.PID_FILE'), file=sys.stderr)
print(error, file=sys.stderr)
print(_('Is the master even running?'), file=sys.stderr)
return
try:
os.kill(pid, sig)
except OSError as error:
if error.errno != errno.ESRCH:
raise
print(_('No child with pid: $pid'), file=sys.stderr)
print(error, file=sys.stderr)
print(_('Stale pid file removed.'), file=sys.stderr)
os.unlink(config.PID_FILE)
@implementer(ICLISubCommand)
class SignalCommand:
"""Common base class for simple, signal sending commands."""
name = None
message = None
signal = None
def add(self, parser, command_parser):
"""See `ICLISubCommand`."""
command_parser.add_argument(
'-q', '--quiet',
default=False, action='store_true',
help=_("""\
Don't print status messages. Error messages are still printed to
standard error."""))
def process(self, args):
"""See `ICLISubCommand`."""
if not args.quiet:
print(_(self.message))
kill_watcher(self.signal)
class Stop(SignalCommand):
"""Stop the Mailman daemons."""
name = 'stop'
message = _("Shutting down Mailman's master runner")
signal = signal.SIGTERM
class Reopen(SignalCommand):
"""Reopen the Mailman daemons."""
name = 'reopen'
message = _('Reopening the Mailman runners')
signal = signal.SIGHUP
@implementer(ICLISubCommand)
class Restart(SignalCommand):
"""Stop the Mailman daemons."""
name = 'restart'
message = _('Restarting the Mailman runners')
signal = signal.SIGUSR1
|
avinassh/simple-web-server
|
HTTPClientRequest.py
|
Python
|
mit
| 1,686
| 0.004745
|
"""
This creates an HTTPClientRequest object. The constructor receives the JSON
file which contains the request. Then it creates the o
|
bject with the data
specified from the JSON, builds the appropriate GET/POST URL. When Execute
method is called, the request is sent to server
"""
import urllib2
import sys
import os
import re
import json
from urllib import urlencode
class HTTPClientRequest(object):
"""docstring for HTTPClientRequest"""
def __init__(self, request_specs, HOS
|
T_NAME, PORT_NUMBER):
""" Initializes the object """
self.base_url = self._set_base_url(HOST_NAME, PORT_NUMBER)
self.create_request(request_specs)
def create_request(self, request_specs):
""" Creates the request """
try:
payload = self._convert_json_to_dict(request_specs)
payload = self._encode_payload(payload)
self.request_url = urllib2.Request(url=self.base_url, data=payload)
except Exception, e:
raise e
def execute_request(self):
""" Executes the request and returns the response object """
try:
return urllib2.urlopen(self.request_url)
except Exception, e:
raise e
def _convert_json_to_dict(self, request_specs):
""" Converts the JSON into dictionary """
return json.loads(request_specs)
def _encode_payload(self, payload):
""" Returns the string of enocoded url into proper POST url format """
return urlencode(payload)
def _set_base_url(self, HOST_NAME, PORT_NUMBER):
""" Returns the string in the hostname:port format """
return HOST_NAME+':'+PORT_NUMBER
|
awolfe76/rural-with-mapbox
|
src/geojson/mk_geojson.py
|
Python
|
cc0-1.0
| 2,410
| 0.039419
|
#write one example for simplify by projecting;
#write a different example for simplifying in DD (0.00001)
#working off of downloaded uauc table
#create working table of state
#simplify polygon (perhaps project to GM, then try multiple distances)
#check vertex count, visual polygon coarseness etc
#use ogr2ogr to export to geojson
#check filesize
#example is vermont
import os
import psycopg2
import time
now = time.localtime(ti
|
me.time())
print "start time:", time
|
.asctime(now)
#variables
myHost = "localhost"
myPort = "5432"
myUser = "feomike"
db = "feomike"
sch = "analysis"
i_tbl = "tl_2015_us_uac10"
o_tb = "uauc"
r_tbl = "county_ru_dump"
myDist = "10" #3, 5, 10, 13, 15
print "weeding at a distance of " + myDist
#dissolve distance in dd must be smaller than 0.001
#in projected space it is 3 M (using 900913)
def mk_working(myST):
mySQL = "DROP TABLE IF EXISTS " + sch + ".uauc_" + myST + "; COMMIT;"
mySQL = mySQL + "CREATE TABLE " + sch + ".uauc_" + myST + " AS "
mySQL = mySQL + "select gid, uace10, name10, "
mySQL = mySQL + "st_transform(st_simplifypreservetopology(st_transform(geom,900913),"
#mySQL = mySQL + "geom "
mySQL = mySQL + myDist + "),4326) as geom "
mySQL = mySQL + "from " + sch + "." + i_tbl
mySQL = mySQL + " where name10 like '%" + myST + "%'; COMMIT; "
theCur.execute(mySQL)
def del_inRural(myST):
mySQL = "DELETE FROM " + sch + ".uauc_" + myST + " where gid in "
mySQL = mySQL + " ( SELECT " + "uauc_" + myST + ".gid FROM "
mySQL = mySQL + sch + ".uauc_" + myST + ", " + sch + "." + r_tbl + " "
mySQL = mySQL + "where ST_Contains(" + r_tbl + ".geom, ST_Centroid("
mySQL = mySQL + "uauc_" + myST + ".geom)) ); COMMIT;"
theCur.execute(mySQL)
#set up the connection to the database
myConn = "dbname=" + db + " host=" + myHost + " port=" + myPort + " user=" + myUser
conn = psycopg2.connect(myConn)
theCur = conn.cursor()
states = ["AK","AL","AR","AS","AZ","CA","CO","CT"]
states = states + ["DC","DE","FL","GA","GU","HI","IA","ID"]
states = states + ["IL","IN","KS","KY","LA","MA","MD","ME"]
states = states + ["MI","MN","MO","MP","MS","MT","NC","ND"]
states = states + ["NE","NH","NJ","NM","NV","NY","OH","OK"]
states = states + ["OR","PA","PR","RI","SC","SD","TN","TX"]
states = states + ["UT","VA","VI","VT","WA","WI","WV","WY"]
#states = ["MN"]
for st in states:
print " doing " + st
mk_working(st)
del_inRural(st)
theCur.close()
del theCur
|
wangqingbaidu/aliMusic
|
models/new_songs_incr.py
|
Python
|
gpl-3.0
| 38,314
| 0.012382
|
# -*- coding: UTF-8 -*-
'''
Authorized by vlon Jang
Created on Jul 3, 2016
Email:zhangzhiwei@ict.ac.cn
From Institute of Computing Technology
All Rights Reserved.
'''
import pandas as pd
import numpy as np
import pymysql
import matplotlib
# from sklearn.preprocessing.data import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
import random
random.seed(219)
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
data2use = {True:'p2_clean_user_actions_with_artists', False:'p2_user_actions_with_artists'}
maxt = {True:None, False:'smaller than maxt'}
lent = {True:None, False:'smaller than lent'}
train_ds = ['20150630', '20150530', '20150430']
class NewSongDecay:
"""
Parameters
-------------
@toDate: Must be set, eg. 20150630, it will predict 20150702-20150830
@bySelfModel: True or False, if False will ignore addInfinity and infinityNumberRatio
@addInfinity: True or False, use infinite or not
@infinityNumberRatio: float, 使用的无穷远点个数占总长度的比例
@updateFactor: True or False, update decay factor or not
@useFactor: True or False, use decay factor or not
@ifShowEachArtistScore: True or False, show score of each artist predicted
@max_value_threshold: If the max number of a train set smaller than this it will be ignored
@train_length_threshold: If the length of a train set smaller than this it will be ignored
@r2_threashold: If the prediction of a test set smaller than this it will be treated as unpredictable
@predict_length_threshold: If the length of a test is smaller than this it will be treated as unpredictable
"""
def __init__(self,
toDate = None,
bySelfModel = False,
addInfinity = False,
infinityNumberRatio = .5,
new_song_day_use = 61,
me_day_use = 14,
max_value_threshold = 500,
train_length_threshold = 30,
r2_threashold = .5,
predict_length_threshold = 5,
use_clean = True,
local_data2use = 'artist_play',
updateFactor = False,
useFactor = True,
first_use = False,
ifTestOnline = False,
ifDebug = True,
ifPlot = True,
ifPlotTrain = False,
ifPlotTest = False,
ifShowEachArtistScore = False,
host='10.25.0.118', port=3306,user='root', passwd='111111', db='alimusic'):
assert toDate
self.ifDebug = ifDebug
self.ifTestOnline = ifTestOnline
self.infinityNumberRatio = infinityNumberRatio
self.ifShowEachArtistScore = ifShowEachArtistScore
self.ifPlot = ifPlot
self.ifPlotTrain = ifPlotTrain,
self.ifPlotTest = ifPlotTest
self.toDate = toDate
self.updateFactor = updateFactor
self.useFactor = useFactor
self.bySelfModel = bySelfModel
self.addInfinity = addInfinity
self.unpredictable = []
self.max_value_threshold = max_value_threshold
self.train_length_threshold = train_length_threshold
self.r2_threashold = r2_threashold
self.predict_length_threshold = predict_length_threshold
self.new_song_from_date = (datetime.datetime.strptime(self.toDate, '%Y%m%d') +
datetime.timedelta(days = -new_song_day_use + 1)).strftime('%Y%m%d')
self.me_day_use = me_day_use
self.me_from_date = (datetime.datetime.strptime(self.toDate, '%Y%m%d') +
datetime.timedelta(days = -me_day_use + 1)).strftime('%Y%m%d')
tfgapday = datetime.timedelta(days=2)
ttgapday = datetime.timedelta(days= 61)
self.tfromDate = (datetime.datetime.strptime(self.toDate, "%Y%m%d").date() +
tfgapday).strftime('%Y%m%d')
self.ttoDate = (datetime.datetime.strptime(self.toDate, "%Y%m%d").date() +
ttgapday).strftime('%Y%m%d')
self.use_clean = use_clean
self.local_data2use = local_data2use
self.first_use = first_use
self.decay_artist = []
self.X_additional_features = None
self.mysql_cn= pymysql.connect(host=host, port=port,user=user, passwd=passwd, db=db)
def __del__(self):
if self.mysql_cn:
self.mysql_cn.close()
def __transform_X(self, x):
return 1.0/(1+np.log2(x)/np.log2(100))
# return np.power(x, 0.1)
# return 1.0 / x
def __transform_y(self, y, max_y):
return y / max_y
def __inverse_transform_y(self, y, max_y):
return y * max_y
def getOnlineSQL(self):
return self.genDependence() + self.genPeriodME() + self.genNewSongOutBaseline()
def getResultSQL(self):
return self.genResultTable()
def genDependence(self):
sqlTemplate = '''
drop table if exists zzw_all_keys;
create table zzw_all_keys as
select c.artist_id,to_char(dateadd(to_date(c.ds,"yyyymmdd"),61,"dd"),"yyyymmdd") as ds from (
select artist_id,ds from {data2use}
where ds>="20150301" and ds<"20150831" group by artist_id,ds
)c;
drop table if exists zzw_new_songs;
create table zzw_new_songs as
select a.artist_id, a.ds, a.action_type, b.publish_time from
{data2use} a
join (
select song_id, publish_time from songs
where publish_time >= '20150301' and publish_time <= '20150830'
)b
on a.song_id = b.song_id;
drop table if exists new_songs_plays;
create table new_songs_plays as
select concat(artist_id, publish_time) as album, ds, count(1) as plays from zzw_new_songs
where action_type = '1'
group by artist_id, publish_time, ds;
'''
if self.first_use:
return sqlTemplate.format(data2use = data2use[self.use_clean])
else:
return ''
def genPeriodME(self):
if self.ifDebug:
print self.new_song_from_date, self.me_from_date, self.toDate
sqlTemplate = '''
drop table if exists period_new_songs_out_me;
create table period_new_songs_out_me as
select artist_id,sum(1/plays)/sum(1/plays/plays) as plays from(
select song_out.artist_id,song_out.ds,count(1) as plays
from (
select fi.* from(
select cua.*, s.album from
p2_user_song_with_user_clean_2 cua
left outer join(
select song_id, concat(artist_id, publish_time) as album from songs
)s on s.song_id = cua.song_id
)fi
where fi.album not in(
select album from predictable
)
|
)song_out
where song_out.action_type=1
and song_out.ds >= "{me_from_date}"
and song_out.ds <= "{toDate}"
group by song_out.artist_id,song_out.ds
)b
group by artist_id;
'''
return sqlTemplate.format(new_song_from_date = self.new_s
|
ong_from_date,
toDate = self.toDate,
me_from_date = self.me_from_date,
data2use = data2use[self.use_clean])
def genNewSongOutBaseline(self):
sqlTemplate = '''
drop table if exists new_song_out_baseline;
create table new_song_out_
|
oVirt/imgbased
|
src/imgbased/plugins/openscap.py
|
Python
|
gpl-2.0
| 1,850
| 0
|
import logging
from ..openscap import OSCAPScanner
log = logging.getLogger(__package__)
def init(app):
app.hooks.connect("pre-arg-parse", add_argparse)
app.hooks.connect("post-arg-parse", post_argparse)
def add_argparse(app, parser, subparsers):
s = subparsers.add_parser("openscap", help="Security management")
s.add_argument("--all", action="store_true",
help="List all available profiles")
s.add_argument("--list", action="store_true",
help="List registered profile")
s.add_argument("--configure", action="store_true",
help="Auto configure SCAP profile and datastream")
s.add_argument("--register", nargs=2, metavar=("DATASTREAM", "PROFILE"),
help="Register data for scanning")
s.add_argument("--unregister", metavar="PROFILE",
help="Register data for scanning")
s.add_argument("--scan", metavar="PATH",
help="Use registered profile to perform a scan")
s.add_argument("--remediate", metavar="PATH",
help="Use registered profile to remediate system")
def post_argparse(app, args):
if args.command == "openscap":
os = OSCAPScanner()
if args.list:
print("Regis
|
tered profile: %s" % os.profile)
elif args.all:
for id
|
_, desc in os.profiles().items():
print("Id: %s\n %s\n" % (id_, desc))
elif args.configure:
os.configure()
elif args.register:
datastream, profile = args.register
os.register(datastream, profile)
elif args.unregister:
os.unregister(args.unregister)
elif args.scan:
os.scan(remediate=False, path=args.scan)
elif args.remediate:
os.scan(remediate=True, path=args.remediate)
|
jsemple19/BSA_simulation
|
recombSim2.py
|
Python
|
gpl-2.0
| 10,746
| 0.020101
|
# last modified 20151215 changed sequencing selection to use sampling with
# replacement for all populations sizes
import numpy as np
import random as rnd
from itertools import groupby
import os
import random
#mySeed=2015
#random.seed(mySeed)
def encode(input_nparray):
'''
converts a numpy boolean array to numpy array with run length encoding
array( [1,1,1,0,0,0], dtype=bool ) -> array( [[3,1],[3,0]], dtype=int )
Note: using a list of lists might be faster but takes a bit more space
not sure what the tradeoff is.
'''
return np.array([[len(list(rep)), int(val)] for val,rep in
groupby(input_nparray)],dtype=int)
def decode(rle_lst):
'''
expands a numpy two dimensional array of run length encoding into a one
dimensional numpy boolean array
array( [[3,1],[3,0]], dtype=int ) -> array( [1,1,1,0,0,0], dtype=bool )
'''
return np.array(np.hstack([val] * rep for rep,val in rle_lst),dtype=bool)
class celegRR:
'''
object with C. elegans specific recombination rate data from Rockman et al.
self.fractions takes the number of cM and transforms them to fraction of the
total chromosome length
self.rates takes the recombination rates in each of these regions, and adds
a small adjustment to that the tips of the chromosome have at least some low
recombination rate(>100x less then the centre of the choromosome)
'''
def __init__(self,numLoci=100):
self.numLoci=numLoci
self.fractions=np.array([[3.5, 22.1, 47.7, 25.4, 1.3],
[2.0, 29.9, 46.7, 16.9, 4.5],
[3.6, 23.4, 48.0, 20.9, 4.1],
[4.1, 18.2, 51.9, 21.4, 4.4],
[3.1, 25.1, 50.9, 18.1, 2.8],
[3.2, 31.4, 35.8, 22.2, 7.4]],dtype=float)/100
self.chrNames=["chrI","chrII","chrIII","chrIV","chrV","chrX"]
self.adjust=np.vstack((np.array([0.01]*6),np.array([-0.01]*6),
np.array([0]*6),np.array([-0.01]*6),
np.array([0.01]*6))).transpose()
self.rates=np.array([[0, 3.43, 1.34, 6.78, 0],
[0, 4.92, 1.33, 8.47, 0],
[0, 7.83, 1.17, 7.24, 0],
[0, 7.65, 1.05, 3.64, 0],
[0, 3.22, 1.32, 5.47, 0],
[0, 3.81, 1.70, 5.14, 0]],dtype=float)+self.adjust
def perLocusRecRates(self,chr="chrIII"):
'''
perLocusRecRates(str) -> nd.array
Returns a numpy array of length numLoci with specific recombination
probability at each locus, such that they sum to a total probability is
1 over the whole chromosome (array). If none of the C. elegans
chromosomes are specified then an array of uniform probabilities
will be returned.
'''
if (chr not in self.chrNames):
probRec=np.ones(self.numLoci,dtype=float)/self.numLoci
return probRec
i=self.chrNames.index(chr)
probRec=np.hstack([[self.rates[i,j]]*np.floor(self.fractions[i,j]*self.numLoci) for j in range(len(self.rates[i,]))])
if (len(probRec)<self.numLoci):
probRec=np.hstack((probRec,[self.rates[i,4]]*(self.numLoci-len(probRec))))
elif (len(probRec)>self.numLoci):
probRec=probRec[0:self.numLoci]
probRec=probRec/sum(probRec)
return probRec
class haploidGenome:
'''
haploid genome with a given number of loci(numLoci), mutation rate (mutRate),
and recombination probability at each locus (recProb).
Optional run length encoding for compression with useRLE.
Currently only supports a single chromosome.
'''
def __init__(self, numLoci, mutRate, recProb, useRLE=False):
self.numLoci=numLoci
self.mutRate=mutRate
self.recProb=recProb
self.useRLE=useRLE
self.loci=[]
def getGenome(self):
if (self.useRLE==True):
return decode(self.loci)
return self.loci
def setGenome(self, genome):
if (self.useRLE==True):
self.loci=encode(genome)
else:
self.loci=genome.astype(bool)
def mutate(self):
mut=np.random.binomial(1,self.mutRate,self.numLoci)
self.setGenome(abs(self.getGenome()-mut))
def qtlGenotypeVal(self,qtlLoci,effectSize):
return sum(self.getGenome()[qtlLoci]*effectSize)
class founderGenome(haploidGenome):
'''
a haploid genome of a par
|
ental stain.
contains method to create a founder genome of a single genotype (0 or 1)
of a specified length
'''
def __init__(self,haploidGenome):
self
|
.numLoci=haploidGenome.numLoci
self.mutRate=haploidGenome.mutRate
self.recProb=haploidGenome.recProb
self.useRLE=haploidGenome.useRLE
def createFounder(self, genotype):
if (genotype==1):
self.setGenome(np.ones(self.numLoci,dtype=bool))
else:
self.setGenome(np.zeros(self.numLoci,dtype=bool))
class daughterGenome(haploidGenome):
'''
a hapliod genome that inherits its attributes from parents.
contains method for recombining two parental genomes to generate a new
daughter genome
'''
def __init__(self,haploidGenome):
self.numLoci=haploidGenome.numLoci
self.mutRate=haploidGenome.mutRate
self.recProb=haploidGenome.recProb
self.useRLE=haploidGenome.useRLE
def recombineOnce(self,genomeA,genomeB,mutate=True):
if (mutate==True):
genomeA.mutate()
genomeB.mutate()
breakpoint=np.random.choice(range(0,genomeA.numLoci),p=genomeA.recProb)
if (rnd.randint(0,1)==0):
gamete=np.hstack((genomeA.getGenome()[0:breakpoint],
genomeB.getGenome()[breakpoint:]))
else:
gamete=np.hstack((genomeB.getGenome()[0:breakpoint],
genomeA.getGenome()[breakpoint:]))
self.setGenome(gamete)
class Population:
'''
a collection of genomes
'''
def __init__(self, N, currentGen=0):
self.N=N
self.currentGen=currentGen
self.genomes=[]
def createF2(self,founder1,founder2,mutRate,recProb):
F2=[]
for g in range(0,self.N*2):
gamete=daughterGenome(founder1)
gamete.recombineOnce(founder1,founder2)
F2.append(gamete)
self.currentGen=2
self.genomes=F2
def createRIpop(self,toGeneration):
if self.currentGen>=toGeneration:
print("Population has already undergone %d generations" %
self.currentGen)
for gen in range(self.currentGen+1,toGeneration+1):
tempPop=[]
gameteIndex1=0
gameteIndex2=1
for indiv in range(self.N):
genomeA=self.genomes[gameteIndex1]
genomeB=self.genomes[gameteIndex2]
gamete1=daughterGenome(genomeA)
gamete2=daughterGenome(genomeA)
gamete1.recombineOnce(genomeA,genomeB)
gamete2.recombineOnce(genomeA,genomeB)
tempPop.append(gamete1)
tempPop.append(gamete2)
gameteIndex1+=2
gameteIndex2+=2
self.genomes=tempPop
rnd.shuffle(self.genomes)
self.currentGen=toGeneration
def pheSelect(self, qtlLoci, effectSize, selectionDepth):
'''
selects selectionDepth% individuals from population according to
genotype value at qtl loci ("selected population")
'''
selected=selectedPopulation(self)
selected.N=int(self.N*selectionDepth/100)
genotypeVals=np.array([g.qtlGenotypeVal(qtlLoci,effectSize)
for g in self.genomes],dtype=int).reshape(self.N,2).transpose()
diploidVals=genotypeVals[0]+genotypeVals[1]
varEnv=np.var(diploidVals)
pheVals=diploidVals+np.random.normal(0,varEnv,self.N)
pheOrder=pheVals.argsort()[::-1][:selected.N]
indexRow=list(pheOrder*2)+list(pheOrder*2+1)
selected.genomes=[self.genomes[i] for i in indexRow]
return selected
|
googleads/google-ads-python
|
google/ads/googleads/v10/services/types/campaign_service.py
|
Python
|
apache-2.0
| 6,174
| 0.00081
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v10.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v10.resources.types import campaign as gagr_campaign
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.services",
marshal="google.ads.googleads.v10",
manifest={
"MutateCampaignsRequest",
"CampaignOperation",
"MutateCampaignsResponse",
"MutateCampaignResult",
},
)
class MutateCampaignsRequest(proto.Message):
r"""Request message for
[CampaignService.MutateCampaigns][google.ads.googleads.v10.services.CampaignService.MutateCampaigns].
Attributes:
customer_id (str):
Required. The ID of the customer whose
campaigns are being modified.
operations (Sequence[google.ads.googleads.v10.services.types.CampaignOperation]):
Required. The list of operations to perform
on individual campaigns.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v10.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE,
|
number=2, message="CampaignOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class CampaignOperation(proto.Message):
r"""A single operation (create, update, remove) on a campaign.
This me
|
ssage has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v10.resources.types.Campaign):
Create operation: No resource name is
expected for the new campaign.
This field is a member of `oneof`_ ``operation``.
update (google.ads.googleads.v10.resources.types.Campaign):
Update operation: The campaign is expected to
have a valid resource name.
This field is a member of `oneof`_ ``operation``.
remove (str):
Remove operation: A resource name for the removed campaign
is expected, in this format:
``customers/{customer_id}/campaigns/{campaign_id}``
This field is a member of `oneof`_ ``operation``.
"""
update_mask = proto.Field(
proto.MESSAGE, number=4, message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=gagr_campaign.Campaign,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof="operation",
message=gagr_campaign.Campaign,
)
remove = proto.Field(proto.STRING, number=3, oneof="operation",)
class MutateCampaignsResponse(proto.Message):
r"""Response message for campaign mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v10.services.types.MutateCampaignResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateCampaignResult",
)
class MutateCampaignResult(proto.Message):
r"""The result for the campaign mutate.
Attributes:
resource_name (str):
Returned for successful operations.
campaign (google.ads.googleads.v10.resources.types.Campaign):
The mutated campaign with only mutable fields after mutate.
The field will only be returned when response_content_type
is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1,)
campaign = proto.Field(
proto.MESSAGE, number=2, message=gagr_campaign.Campaign,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
barliant/fnc-id
|
django_project/old/hoaxdetector/hoaxdetector/wsgi.py
|
Python
|
apache-2.0
| 401
| 0
|
"""
WSGI conf
|
ig for hoaxdetector project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hoaxdetector.settings")
application = get_wsgi_applic
|
ation()
|
nMustaki/python-slugify
|
setup.py
|
Python
|
bsd-3-clause
| 2,653
| 0.000754
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
import codecs
name = 'python-slugify'
package = 'slugify'
description = 'A Python Slugify application that handles Unicode'
url = 'https://github.com/un33k/python-slugify'
author = 'Val Neekman'
author_email = 'info@neekware.com'
license = 'BSD'
install_requires = ['Unidecode>=0.04.16']
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = codecs.open(os.path.join(package, '__init__.py'), encoding='utf-8').read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
|
Return root package and all
|
sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
args = {'version': get_version(package)}
print("You probably want to also tag the version now:")
print(" git tag -a %(version)s -m 'version %(version)s'" % args)
print(" git push --tags")
sys.exit()
setup(
name=name,
version=get_version(package),
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=install_requires,
classifiers=classifiers,
entry_points={'console_scripts': ['slugify=slugify.slugify:main']},
)
|
nschaetti/EchoTorch
|
echotorch/nn/LiESN.py
|
Python
|
gpl-3.0
| 4,533
| 0.004853
|
# -*- coding: utf-8 -*-
#
# File : echotorch/nn/ESN.py
# Description : An Echo State Network module.
# Date : 26th of January, 2018
#
# This file is part of EchoTorch. EchoTorch is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Nils Schaetti <nils.schaetti@unine.ch>
"""
Created on 26 January 2018
@author: Nils Schaetti
"""
import torch
from .LiESNCell import LiESNCell
from .ESN import ESN
# Leaky-Integrated Echo State Network module
class LiESN(ESN):
"""
Leaky-Integrated Echo State Network module
"""
# Constructor
def __init__(self, input_dim, hidden_dim, output_dim, spectral_radius=0.9,
bias_scaling=0, input_scaling=1.0, w=None, w_in=None, w_bias=None, sparsity=None,
input_set=[1.0, -1.0], w_sparsity=None, nonlin_func=torch.tanh, learning_algo='inv', ridge_param=0.0,
leaky_rate=1.0, train_leaky_rate=False, feedbacks=False, wfdb_sparsity=None,
normalize_feedbacks=False, softmax_output=False, seed=None, washout=0, w_distrib='uniform',
win_distrib='uniform', wbias_distrib='uniform', win_normal=(0.0, 1.0), w_normal=(0.0, 1.0),
wbias_normal=(0.0, 1.0), dtype=torch.float32):
"""
Constructor
:param input_dim:
:param hidden_dim:
:param output_dim:
:param spectral_radius:
:param bias_scaling:
:param input_scaling:
:param w:
:param w_in:
|
:param w_bias:
:param sparsity:
:param input_set:
:param w_sparsity:
:param nonlin_func:
:param learning_algo:
:param ridge_param:
:param leaky_rate:
:param train_leaky_rate:
:param feedbacks:
"""
super(LiE
|
SN, self).__init__(input_dim, hidden_dim, output_dim, spectral_radius=spectral_radius,
bias_scaling=bias_scaling, input_scaling=input_scaling,
w=w, w_in=w_in, w_bias=w_bias, sparsity=sparsity, input_set=input_set,
w_sparsity=w_sparsity, nonlin_func=nonlin_func, learning_algo=learning_algo,
ridge_param=ridge_param, create_cell=False, feedbacks=feedbacks,
wfdb_sparsity=wfdb_sparsity, normalize_feedbacks=normalize_feedbacks,
softmax_output=softmax_output, seed=seed, washout=washout, w_distrib=w_distrib,
win_distrib=win_distrib, wbias_distrib=wbias_distrib, win_normal=win_normal,
w_normal=w_normal, wbias_normal=wbias_normal, dtype=torch.float32)
# Recurrent layer
self.esn_cell = LiESNCell(leaky_rate, train_leaky_rate, input_dim, hidden_dim, spectral_radius=spectral_radius,
bias_scaling=bias_scaling, input_scaling=input_scaling,
w=w, w_in=w_in, w_bias=w_bias, sparsity=sparsity, input_set=input_set,
w_sparsity=w_sparsity, nonlin_func=nonlin_func, feedbacks=feedbacks,
feedbacks_dim=output_dim, wfdb_sparsity=wfdb_sparsity,
normalize_feedbacks=normalize_feedbacks, seed=seed, w_distrib=w_distrib,
win_distrib=win_distrib, wbias_distrib=wbias_distrib, win_normal=win_normal,
w_normal=w_normal, wbias_normal=wbias_normal, dtype=torch.float32)
# end __init__
###############################################
# PROPERTIES
###############################################
###############################################
# PUBLIC
###############################################
###############################################
# PRIVATE
###############################################
# end ESNCell
|
mtambos/online-anomaly-detection
|
src/mgng/cdf_table.py
|
Python
|
mit
| 263,594
| 0
|
#!/usr/bin/env python
CDF_TABLE = {
-4.0: 3.1671241833119863e-05,
-3.999: 3.1805340054201978e-05,
-3.998: 3.1939975607705564e-05,
-3.997: 3.2075150511550028e-05,
-3.996: 3.2210866790657701e-05,
-3.995: 3.2347126476975879e-05,
-3.994: 3.2483931609498828e-05,
-3.993: 3.2621284234289769e-05,
-3.992: 3.2759186404502981e-05,
-3.991: 3.2897640180406055e-05,
-3.99: 3.3036647629402356e-05,
-3.989: 3.3176210826053232e-05,
-3.988: 3.3316331852099976e-05,
-3.987: 3.3457012796487145e-05,
-3.986: 3.3598255755384087e-05,
-3.985: 3.3740062832208368e-05,
-3.984: 3.3882436137647765e-05,
-3.983: 3.4025377789683313e-05,
-3.982: 3.4168889913612062e-05,
-3.981: 3.4312974642069613e-05,
-3.98: 3.4457634115053068e-05,
-3.979: 3.4602870479944289e-05,
-3.978: 3.4748685891532589e-05,
-3.977: 3.489508251203793e-05,
-3.976: 3.5042062511133823e-05,
-3.975: 3.5189628065970669e-05,
-3.974: 3.5337781361199108e-05,
-3.973: 3.5486524588993364e-05,
-3.972: 3.5635859949074122e-05,
-3.971: 3.5785789648732663e-05,
-3.97: 3.5936315902853758e-05,
-3.969: 3.6087440933939699e-05,
-3.968: 3.623916697213351e-05,
-3.967: 3.6391496255242999e-05,
-3.966: 3.6544431028764378e-05,
-3.965: 3.6697973545905876e-05,
-3.964: 3.6852126067611854e-05,
-3.963: 3.700689086258668e-05,
-3.962: 3.716227020731895e-05,
-3.961: 3.7318266386104972e-05,
-3.96: 3.7474881691073423e-05,
-3.959: 3.7632118422209472e-05,
-3.958: 3.7789978887378999e-05,
-3.957: 3.7948465402352815e-05,
-3.956: 3.8107580290831086e-05,
-3.955: 3.8267325884467786e-05,
-3.954: 3.8427704522895477e-05,
-3.953: 3.8588718553749661e-05,
-3.952: 3.8750370332693209e-05,
-3.951: 3.8912662223441674e-05,
-3.95: 3.9075596597787456e-05,
-3.949: 3.9239175835625169e-05,
-3.948: 3.940340232497605e-05,
-3.947: 3.9568278462013455e-05,
-3.946: 3.973380665108771e-05,
-3.945: 3.9899989304750919e-05,
-3.944: 4.0066828843782466e-05,
-3.943: 4.0234327697214316e-05,
-3.942: 4.0402488302356283e-05,
-3.941: 4.0571313104821281e-05,
-3.94: 4.0740804558550722e-05,
-3.939: 4.0910965125840093e-05,
-3.938: 4.1081797277364898e-05,
-3.937: 4.125330349220586e-05,
-3.936: 4.1425486257874634e-05,
-3.935: 4.1598348070340024e-05,
-3.934: 4.1771891434053205e-05,
-3.933: 4.1946118861974396e-05,
-3.932: 4.2121032875598015e-05,
-3.931: 4.2296636004979594e-05,
-3.93: 4.2472930788761139e-05,
-3.929: 4.2649919774197972e-05,
-3.928: 4.2827605517184274e-05,
-3.927: 4.300599058228019e-05,
-3.926: 4.3185077542737911e-05,
-3.925: 4.336486898052778e-05,
-3.924: 4.3545367486365296e-05,
-3.923: 4.372657565973768e-05,
-3.922: 4.3908496108930332e-05,
-3.921: 4.4091131451053942e-05,
-3.92: 4.4274484312070743e-05,
-3.919: 4.4458557326821717e-05,
-3.918: 4.46433531390
|
5388e-05,
-3.917: 4.4828874401446823e-05,
-3.916: 4.5015123775639811e-05,
-3.915: 4.5202103932259478e-05,
-3.914: 4.5389817550946242e-05,
-3.913: 4.5578267320382521e-05,
-3.912: 4.576745593831931e-05,
-3.911: 4.5957386111604217e-05,
|
-3.91: 4.6148060556208749e-05,
-3.909: 4.6339481997255854e-05,
-3.908: 4.6531653169047459e-05,
-3.907: 4.6724576815092721e-05,
-3.906: 4.6918255688135439e-05,
-3.905: 4.7112692550181989e-05,
-3.904: 4.7307890172529221e-05,
-3.903: 4.7503851335792433e-05,
-3.902: 4.7700578829933875e-05,
-3.901: 4.7898075454290671e-05,
-3.9: 4.8096344017602614e-05,
-3.899: 4.829538733804146e-05,
-3.898: 4.849520824323816e-05,
-3.897: 4.8695809570312447e-05,
-3.896: 4.8897194165900402e-05,
-3.895: 4.9099364886183916e-05,
-3.894: 4.9302324596918514e-05,
-3.893: 4.950607617346294e-05,
-3.892: 4.9710622500807232e-05,
-3.891: 4.9915966473602184e-05,
-3.89: 5.0122110996188348e-05,
-3.889: 5.032905898262442e-05,
-3.888: 5.053681335671687e-05,
-3.887: 5.0745377052049342e-05,
-3.886: 5.0954753012011497e-05,
-3.885: 5.1164944189828644e-05,
-3.884: 5.1375953548590865e-05,
-3.883: 5.1587784061282594e-05,
-3.882: 5.180043871081255e-05,
-3.881: 5.2013920490043229e-05,
-3.88: 5.2228232401820067e-05,
-3.879: 5.2443377459002123e-05,
-3.878: 5.2659358684491198e-05,
-3.877: 5.2876179111262456e-05,
-3.876: 5.3093841782393697e-05,
-3.875: 5.3312349751096242e-05,
-3.874: 5.3531706080744701e-05,
-3.873: 5.3751913844906934e-05,
-3.872: 5.3972976127375209e-05,
-3.871: 5.4194896022195686e-05,
-3.87: 5.4417676633699595e-05,
-3.869: 5.4641321076533749e-05,
-3.868: 5.4865832475690464e-05,
-3.867: 5.509121396653895e-05,
-3.866: 5.5317468694856288e-05,
-3.865: 5.5544599816857711e-05,
-3.864: 5.5772610499228015e-05,
-3.863: 5.6001503919152081e-05,
-3.862: 5.6231283264346269e-05,
-3.861: 5.6461951733090002e-05,
-3.86: 5.6693512534256533e-05,
-3.859: 5.6925968887344224e-05,
-3.858: 5.715932402250805e-05,
-3.857: 5.7393581180591558e-05,
-3.856: 5.7628743613158088e-05,
-3.855: 5.786481458252196e-05,
-3.854: 5.8101797361781221e-05,
-3.853: 5.8339695234848456e-05,
-3.852: 5.8578511496483678e-05,
-3.851: 5.881824945232506e-05,
-3.85: 5.9058912418922374e-05,
-3.849: 5.9300503723768356e-05,
-3.848: 5.9543026705330624e-05,
-3.847: 5.9786484713084543e-05,
-3.846: 6.0030881107545608e-05,
-3.845: 6.027621926030163e-05,
-3.844: 6.0522502554045546e-05,
-3.843: 6.0769734382607454e-05,
-3.842: 6.1017918150987912e-05,
-3.841: 6.1267057275390561e-05,
-3.84: 6.151715518325519e-05,
-3.839: 6.1768215313289857e-05,
-3.838: 6.2020241115505185e-05,
-3.837: 6.2273236051246087e-05,
-3.836: 6.2527203593226323e-05,
-3.835: 6.2782147225560488e-05,
-3.834: 6.3038070443798447e-05,
-3.833: 6.3294976754958213e-05,
-3.832: 6.3552869677559328e-05,
-3.831: 6.3811752741656661e-05,
-3.83: 6.4071629488874414e-05,
-3.829: 6.4332503472439703e-05,
-3.828: 6.4594378257215601e-05,
-3.827: 6.4857257419736669e-05,
-3.826: 6.5121144548241204e-05,
-3.825: 6.5386043242706847e-05,
-3.824: 6.5651957114884078e-05,
-3.823: 6.5918889788330081e-05,
-3.822: 6.6186844898443749e-05,
-3.821: 6.6455826092500043e-05,
-3.82: 6.6725837029684668e-05,
-3.819: 6.6996881381127554e-05,
-3.818: 6.7268962829939319e-05,
-3.817: 6.7542085071244345e-05,
-3.816: 6.7816251812216955e-05,
-3.815: 6.8091466772115354e-05,
-3.814: 6.8367733682317419e-05,
-3.813: 6.8645056286355626e-05,
-3.812: 6.8923438339951702e-05,
-3.811: 6.9202883611052415e-05,
-3.81: 6.9483395879865021e-05,
-3.809: 6.9764978938892722e-05,
-3.808: 7.0047636592969923e-05,
-3.807: 7.0331372659297682e-05,
-3.806: 7.0616190967479937e-05,
-3.805: 7.0902095359559164e-05,
-3.804: 7.1189089690052422e-05,
-3.803: 7.1477177825986555e-05,
-3.802: 7.176636364693543e-05,
-3.801: 7.2056651045054797e-05,
-3.8: 7.2348043925119976e-05,
-3.799: 7.264054620456058e-05,
-3.798: 7.2934161813498341e-05,
-3.797: 7.3228894694783183e-05,
-3.796: 7.3524748804028924e-05,
-3.795: 7.3821728109651046e-05,
-3.794: 7.4119836592903242e-05,
-3.793: 7.4419078247914218e-05,
-3.792: 7.4719457081723979e-05,
-3.791: 7.5020977114321973e-05,
-3.79: 7.5323642378683189e-05,
-3.789: 7.5627456920806231e-05,
-3.788: 7.593242479975023e-05,
-3.787: 7.6238550087671903e-05,
-3.786: 7.6545836869863155e-05,
-3.785: 7.6854289244789463e-05,
-3.784: 7.7163911324126697e-05,
-3.783: 7.7474707232798608e-05,
-3.782: 7.7786681109015871e-05,
-3.781: 7.8099837104312394e-05,
-3.78: 7.8414179383585065e-05,
-3.779: 7.8729712125130157e-05,
-3.778: 7.9046439520683102e-05,
-3.777: 7.9364365775455817e-05,
-3.776: 7.9683495108174974e-05,
-3.775: 8.0003831751120872e-05,
-3.774: 8.0325379950166369e-05,
-3.773: 8.0648143964
|
homeworkprod/byceps
|
tests/unit/services/orga/test_birthday_service.py
|
Python
|
bsd-3-clause
| 1,509
| 0
|
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from datetime import date
from freezegun import freeze_time
from byceps.database import generate_uuid
from byceps.services.orga import birthday_service
from byceps.services.orga.transfer.models import Birthday
from byceps.services.user.transfer.models import User
from byceps.typing import UserID
@freeze_time('1994-09-30')
def test_sort():
born1985 = create_user_and_birthday(date(1985, 9, 29))
born1987 = create_user_and_birthday(date(1987, 10, 1))
born1991 = create_user_and_birthday(date(1991, 11, 14))
born1992 = create_user_and_birthday(date(1992, 11, 14))
born1994 = create_user_and_birthday(date(1994, 9, 30))
users_and_birthdays = [
born1994,
born1992,
born1985,
born1991,
born1987,
]
expected = [
born1994,
born1987,
born1991,
born1992,
born1985,
]
actual = list(
birthday_service.sort_users_by_next_birthday(users_and_birthdays)
|
)
assert
|
actual == expected
# helpers
def create_user_and_birthday(date_of_birth: date) -> tuple[User, Birthday]:
user = User(
id=UserID(generate_uuid()),
screen_name=f'born-{date_of_birth}',
suspended=False,
deleted=False,
locale=None,
avatar_url=None,
)
birthday = Birthday(date_of_birth)
return user, birthday
|
wukong-m2m/NanoKong
|
tools/python/scripts/installer.py
|
Python
|
gpl-2.0
| 2,839
| 0.002113
|
#!/usr/bin/env python
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../master'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../master/wkpf'))
print os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../master/wkpf')
from wkpf.pynvc import *
from wkpf.wkpfcomm import *
comm = getComm()
print "node ids", comm.getNodeIds()
comm.setFeature(2, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(2, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(2, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(2, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(2, "WuKong")
comm.setFeature(7, WKPF_FEATURE_LIGHT_SENSOR, 1)
comm.setFeature(7, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(7, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(7, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(7, "WuKong")
comm.setFeature(4, WKPF_FEATURE_LIGHT_SENSOR, 1)
comm.setFeature(4, WKPF_FEATURE_LIGHT_ACTUATOR, 0)
comm.setFeature(4, WKPF_FEATURE_NUMERIC_CONTROLLER, 1)
comm.setFeature(4, WKPF_FEATURE_NATIVE_THRESHOLD, 1)
comm.setLocation(4, "WuKong")
comm.setFeature(5, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(5, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(5, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(5, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(5, "WuKong")
comm.setFeature(6, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(6, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(6, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(6, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(6, "WuKong")
comm.setFeature(13, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(13, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(13, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(13, WKPF
|
_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(13, "WuKong")
comm.setFeature(14, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(14, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(14, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(14, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(14, "WuKong")
comm.setFeature(15, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(15, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(15, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(15, WKPF_FEATURE_NATIVE_
|
THRESHOLD, 0)
comm.setLocation(15, "WuKong")
comm.setFeature(10, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(10, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(10, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(10, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(10, "WuKong")
comm.setFeature(12, WKPF_FEATURE_LIGHT_SENSOR, 0)
comm.setFeature(12, WKPF_FEATURE_LIGHT_ACTUATOR, 1)
comm.setFeature(12, WKPF_FEATURE_NUMERIC_CONTROLLER, 0)
comm.setFeature(12, WKPF_FEATURE_NATIVE_THRESHOLD, 0)
comm.setLocation(12, "WuKong")
|
greyside/errand-boy
|
tests/test_mock_transport.py
|
Python
|
bsd-3-clause
| 2,753
| 0.001816
|
import subprocess
import errand_boy
from errand_boy.exceptions import SessionClosedError
from errand_boy.transports import base, mock as mock_transport
from .base import mock, BaseTestCase
from .data import get_command_data
class MockTra
|
nsportSimTestCase(BaseTestCase):
def test_run_cmd(self):
transport = mock_transport.MockTransport()
with self.multiprocessing_patcher as multiprocessing,\
self.subprocess_patcher as mock_subprocess:
mock_subprocess.PIPE = subprocess.PIPE
cmd, stdout, stderr, returncode, requests, responses = get_command_
|
data('ls -al')
process = mock.Mock()
process.communicate.return_value = stdout, stderr
process.returncode = returncode
mock_subprocess.Popen.return_value = process
mock_Pool = mock.Mock()
mock_Pool.apply_async.side_effect = lambda f, args=(), kwargs={}: f(*args, **kwargs)
multiprocessing.Pool.return_value = mock_Pool
result = transport.run_cmd(cmd)
self.assertEqual(result[0], stdout)
self.assertEqual(result[1], stderr)
self.assertEqual(result[2], returncode)
self.assertEqual(mock_subprocess.Popen.call_count, 1)
self.assertEqual(mock_subprocess.Popen.call_args_list[0][0][0], cmd)
def test_session(self):
transport = mock_transport.MockTransport()
with self.multiprocessing_patcher as multiprocessing,\
self.subprocess_patcher as mock_subprocess:
mock_subprocess.PIPE = subprocess.PIPE
cmd, stdout, stderr, returncode, requests, responses = get_command_data('ls -al')
process = mock.Mock()
process.communicate.return_value = stdout, stderr
process.returncode = returncode
mock_subprocess.Popen.return_value = process
mock_Pool = mock.Mock()
mock_Pool.apply_async.side_effect = lambda f, args=(), kwargs={}: f(*args, **kwargs)
multiprocessing.Pool.return_value = mock_Pool
with transport.get_session() as session:
foo = session.subprocess
process = foo.Popen(cmd, shell=True, stdout=foo.PIPE, stderr=foo.PIPE)
res_stdout, res_stderr = process.communicate()
res_returncode = process.returncode
with self.assertRaises(SessionClosedError):
process.returncode
self.assertEqual(res_stdout, stdout)
self.assertEqual(res_stderr, stderr)
self.assertEqual(res_returncode, returncode)
self.assertEqual(mock_subprocess.Popen.call_count, 1)
self.assertEqual(mock_subprocess.Popen.call_args_list[0][0][0], cmd)
|
fluxer/spm
|
nuitka/nuitka/nodes/BuiltinTypeNodes.py
|
Python
|
gpl-2.0
| 10,942
| 0.009048
|
# Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Built-in type nodes tuple/list/float/int etc.
These are all very simple and have predictable properties, because we know their type and
that should allow some important optimizations.
"""
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.optimizations import BuiltinOptimization
from nuitka.PythonVersions import python_version
from .NodeBases import (
ChildrenHavingMixin,
ExpressionBuiltinSingleArgBase,
ExpressionChildrenHavingBase,
ExpressionSpecBasedComputationMixin,
NodeBase
)
from .NodeMakingHelpers import (
makeConstantReplacementNode,
wrapExpressionWithNodeSideEffects
)
class ExpressionBuiltinTypeBase(ExpressionBuiltinSingleArgBase):
pass
class ExpressionBuiltinContainerBase(ExpressionChildrenHavingBase,
ExpressionSpecBasedComputationMixin):
builtin_spec = None
named_children = (
"value",
)
def __init__(self, value, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values = {
"value" : value,
},
source_ref = source_ref
)
getValue = ExpressionChildrenHavingBase.childGetter(
"value"
)
def computeExpression(self, trace_collection):
value = self.getValue()
if value is None:
return self.computeBuiltinSpec(
trace_collection = trace_collection,
given_values = ()
)
elif value.isExpressionConstantXrangeRef():
if value.getIterationLength() <= 256:
return self.computeBuiltinSpec(
trace_collection = trace_collection,
given_values = (value,)
)
else:
return self, None, None
else:
return self.computeBuiltinSpec(
trace_collection = trace_collection,
given_values = (value,)
)
class ExpressionBuiltinTuple(ExpressionBuiltinContainerBase):
kind = "EXPRESSION_BUILTIN_TUPLE"
builtin_spec = BuiltinOptimization.builtin_tuple_spec
class ExpressionBuiltinList(ExpressionBuiltinContainerBase):
kind = "EXPRESSION_BUILTIN_LIST"
builtin_spec = BuiltinOptimization.builtin_list_spec
class ExpressionBuiltinSet(ExpressionBuiltinContainerBase):
kind = "EXPRESSION_BUILTIN_SET"
builtin_spec = BuiltinOptimization.builtin_set_spec
class ExpressionBuiltinFloat(ExpressionBuiltinTypeBase):
kind = "EXPRESSION_BUILTIN_FLOAT"
builtin_spec = BuiltinOptimization.builtin_float_spec
class ExpressionBuiltinBool(ExpressionBuiltinTypeBase):
kind = "EXPRESSION_BUILTIN_BOOL"
builtin_spec = BuiltinOptimization.builtin_bool_spec
def computeExpression(self, trace_collection):
value = self.getValue()
if value is not None:
truth_value = self.getValue().getTruthValue()
if truth_value is not None:
result = wrapExpressionWithNodeSideEffects(
new_node = makeConstantReplacementNode(
constant = truth_value,
node = self,
),
old_node = self.getValue()
)
return (
result,
"new_constant",
"Pre
|
dicted truth value of built-in bool argument"
)
return ExpressionB
|
uiltinTypeBase.computeExpression(self, trace_collection)
class ExpressionBuiltinIntLongBase(ChildrenHavingMixin, NodeBase,
ExpressionSpecBasedComputationMixin):
named_children = ("value", "base")
# Note: Version specific, may be allowed or not.
try:
int(base = 2)
except TypeError:
base_only_value = False
else:
base_only_value = True
# To be overloaded by child classes with int/long.
builtin = int
def __init__(self, value, base, source_ref):
NodeBase.__init__(self, source_ref = source_ref)
if value is None and self.base_only_value:
value = makeConstantReplacementNode(
constant = '0',
node = self
)
ChildrenHavingMixin.__init__(
self,
values = {
"value" : value,
"base" : base
}
)
getValue = ChildrenHavingMixin.childGetter("value")
getBase = ChildrenHavingMixin.childGetter("base")
def computeExpression(self, trace_collection):
value = self.getValue()
base = self.getBase()
given_values = []
if value is None:
if base is not None:
if not self.base_only_value:
return trace_collection.getCompileTimeComputationResult(
node = self,
computation = lambda : self.builtin(base = 2),
description = """\
%s built-in call with only base argument""" % self.builtin.__name__
)
given_values = ()
elif base is None:
given_values = (value,)
else:
given_values = (value, base)
return self.computeBuiltinSpec(
trace_collection = trace_collection,
given_values = given_values
)
class ExpressionBuiltinInt(ExpressionBuiltinIntLongBase):
kind = "EXPRESSION_BUILTIN_INT"
builtin_spec = BuiltinOptimization.builtin_int_spec
builtin = int
class ExpressionBuiltinUnicodeBase(ChildrenHavingMixin, NodeBase,
ExpressionSpecBasedComputationMixin):
named_children = (
"value",
"encoding",
"errors"
)
def __init__(self, value, encoding, errors, source_ref):
NodeBase.__init__(
self,
source_ref = source_ref
)
ChildrenHavingMixin.__init__(
self,
values = {
"value" : value,
"encoding" : encoding,
"errors" : errors
}
)
getValue = ChildrenHavingMixin.childGetter("value")
getEncoding = ChildrenHavingMixin.childGetter("encoding")
getErrors = ChildrenHavingMixin.childGetter("errors")
def computeExpression(self, trace_collection):
args = [
self.getValue(),
self.getEncoding(),
self.getErrors()
]
while args and args[-1] is None:
del args[-1]
for arg in args:
# The value of that node escapes and could change its contents.
trace_collection.removeKnowledge(arg)
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
return self.computeBuiltinSpec(
trace_collection = trace_collection,
given_values = tuple(args)
)
if python_version < 300:
class ExpressionBuiltinStr(ExpressionBuiltinTypeBase):
kind = "EXPRESSION_BUILTIN_STR"
builtin_spec = BuiltinOptimization.builtin_str_spec
def computeExpression(self, trace_collection):
new_node, change_tags, change_desc = ExpressionBuiltinTypeBase.computeExpression(
self,
trace_collection
)
|
kuroneko1996/cyberlab
|
spritesheet.py
|
Python
|
mit
| 869
| 0.002301
|
import pygame as pg
class Spritesheet:
def __init__(self, filename, tile_size):
self.sheet = pg.image.load(filename).convert_alpha()
self.tile_size = tile_size
def get_image(self, x, y, width, height):
image = pg.Surface((width, height))
image.blit(self.sheet, (0, 0), (x, y, width, he
|
ight))
return image
|
def get_image_alpha(self, x, y, width, height):
image = pg.Surface((width, height), pg.SRCALPHA)
image.blit(self.sheet, (0, 0), (x, y, width, height))
return image
def get_image_at_col_row(self, col, row):
return self.get_image(col * self.tile_size, row * self.tile_size, self.tile_size, self.tile_size)
def get_image_alpha_at_col_row(self, col, row):
return self.get_image_alpha(col * self.tile_size, row * self.tile_size, self.tile_size, self.tile_size)
|
googleapis/python-recommendations-ai
|
google/cloud/recommendationengine_v1beta1/services/prediction_service/transports/grpc.py
|
Python
|
apache-2.0
| 11,847
| 0.001857
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.recommendationengine_v1beta1.types import prediction_service
from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
class PredictionServiceGrpcTransport(PredictionServiceTransport):
"""gRPC backend transport for PredictionService.
Service for making recommendation prediction.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "recommendationengine.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
|
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials i
|
dentify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
|
hcosta/escueladevideojuegos.net-backend-django
|
edv/reddit/migrations/0010_question_best_response.py
|
Python
|
gpl-3.0
| 628
| 0.001592
|
# -*- coding: utf-8 -*-
# Generated by Dja
|
ngo 1.11.1 on 2017-05-27 16:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reddit', '0009_auto_20170527_1814'),
]
operations = [
migrations.AddField(
model_name='question',
name='best_response',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletio
|
n.CASCADE, related_name='best_response', to='reddit.Response', verbose_name='Mejor Respuesta'),
),
]
|
ianstalk/Flexget
|
setup.py
|
Python
|
mit
| 2,280
| 0.000877
|
import sys
from pathlib import Path
from setuptools import find_packages, setup
long_description = Path('README.rst').read_text()
# Populates __version__ without importing the package
__version__ = None
with open('flexget/_version.py', encoding='utf-8') as ver_file:
exec(ver_file.read()) # pylint: disable=W0122
if not __version__:
print('Could not find __version__ from flexget/_version.py')
sys.exit(1)
def load_requirements(filename):
return [
line.strip()
for line in Path(filename).read_text().splitlines()
if not line.startswith('#')
]
setup(
name='FlexGet',
version=__version__,
description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '
'from different sources like RSS-feeds, html-pages, various sites and more.',
long_description=long_description,
long_description_content_type='text/x-rst',
author='Marko Koivusalo',
author_email='marko.koivusalo@gmail.com',
license='MIT',
url='https://flexget.com',
project_urls={
'Repository': 'https://github.com/Flexget/Flexget
|
',
'Issue Tracker': 'https://github.com/Flexget/Flexget/issues',
'Forum': 'https://discuss.flexget.com',
},
packages=find_packages(exclude=['flexget.tests']),
include_package_data=True,
zip_safe=False,
install_requires=load_requirements('requirements.txt'),
tests_require=['pytest'],
extras_require={'dev': load_requirements('dev-requirements.txt')},
|
entry_points={
'console_scripts': ['flexget = flexget:main'],
'gui_scripts': [
'flexget-headless = flexget:main'
], # This is useful on Windows to avoid a cmd popup
},
python_requires='>=3.6',
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/httpretty/__init__.py
|
Python
|
agpl-3.0
| 1,914
| 0
|
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFT
|
WARE.
from __future__ import unicode_literals
__version__ = version = '0.8.3'
from .core impo
|
rt httpretty, httprettified
from .errors import HTTPrettyError
from .core import URIInfo
HTTPretty = httpretty
activate = httprettified
enable = httpretty.enable
register_uri = httpretty.register_uri
disable = httpretty.disable
is_enabled = httpretty.is_enabled
reset = httpretty.reset
Response = httpretty.Response
GET = httpretty.GET
PUT = httpretty.PUT
POST = httpretty.POST
DELETE = httpretty.DELETE
HEAD = httpretty.HEAD
PATCH = httpretty.PATCH
OPTIONS = httpretty.OPTIONS
CONNECT = httpretty.CONNECT
def last_request():
"""returns the last request"""
return httpretty.last_request
|
fatherlinux/atomic-reactor
|
tests/plugins/test_check_and_set_rebuild.py
|
Python
|
bsd-3-clause
| 4,173
| 0.001198
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import pytest
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PreBuildPluginsRunner, PluginFailedException
from atomic_reactor.plugins.pre_check_and_set_rebuild import (is_rebuild,
CheckAndSetRebuildPlugin)
from atomic_reactor.util import ImageName
from tests.constants import SOURCE
import json
import os
from osbs.api import OSBS
from flexmock import flexmock
class X(object):
pass
def prepare(key, value, set_labels_args=None, set_labels_kwargs=None):
tasker = DockerTasker()
workflow = DockerBuildWorkflow(SOURCE, "test-image")
setattr(workflow, 'builder', X())
setattr(workflow.builder, 'image_id', 'asd123')
setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
setattr(workflow.builder, 'source', X())
setattr(workflow.builder.source, 'path', '/tmp')
setattr(workflow.builder.source, 'dockerfile_path', None)
# No-op implementation until implemented in osbs-client
flexmock(OSBS)
setattr(OSBS, 'set_labels_on_build_config', lambda **kwargs: None)
expectation = flexmock(OSBS).should_receive('set_labels_on_build_config')
if set_labels_args is not None:
if set_labels_kwargs is None:
set_labels_kwargs = {}
expectation.with_args(*set_labels_args, **set_labels_kwargs)
runner = PreBuildPluginsRunner(tasker, workflow,
[
{
'name': CheckAndSetRebuildPlugin.key,
'args': {
'label_key': key,
'la
|
bel_value': value,
'url': '',
},
}
|
])
return workflow, runner
def test_check_rebuild_no_build_json():
workflow, runner = prepare('is_autorebuild', 'true')
if "BUILD" in os.environ:
del os.environ["BUILD"]
with pytest.raises(PluginFailedException):
runner.run()
def test_check_no_buildconfig():
key = 'is_autorebuild'
value = 'true'
workflow, runner = prepare(key, value)
os.environ["BUILD"] = json.dumps({
"metadata": {
"labels": {
key: value,
}
}
})
# No buildconfig in metadata
with pytest.raises(PluginFailedException):
runner.run()
@pytest.mark.parametrize(('namespace'), [None, 'my_namespace'])
def test_check_is_not_rebuild(namespace):
key = 'is_autorebuild'
value = 'true'
buildconfig = "buildconfig1"
namespace_dict = {}
if namespace is not None:
namespace_dict["namespace"] = namespace
workflow, runner = prepare(key, value,
set_labels_args=(buildconfig, {key: value}),
set_labels_kwargs=namespace_dict)
build_json = {
"metadata": {
"labels": {
"buildconfig": buildconfig,
key: "false",
}
}
}
build_json["metadata"].update(namespace_dict)
os.environ["BUILD"] = json.dumps(build_json)
runner.run()
assert workflow.prebuild_results[CheckAndSetRebuildPlugin.key] == False
assert not is_rebuild(workflow)
def test_check_is_rebuild():
key = 'is_autorebuild'
value = 'true'
workflow, runner = prepare(key, value)
os.environ["BUILD"] = json.dumps({
"metadata": {
"labels": {
"buildconfig": "buildconfig1",
key: value,
}
}
})
runner.run()
assert workflow.prebuild_results[CheckAndSetRebuildPlugin.key] == True
assert is_rebuild(workflow)
|
GeoMop/GeoMop
|
src/Analysis/pipeline/generator_actions.py
|
Python
|
gpl-3.0
| 11,140
| 0.006463
|
from .action_types import GeneratorActionType, ActionStateType
from .data_types_tree import Ensemble, Struct, Float, DTT
import copy
from .code_formater import Formater
class VariableGenerator(GeneratorActionType):
name = "VariableGenerator"
"""Display name of action"""
description = "Generator for creating static DTT variable"
"""Display description of action"""
def __init__(self, **kwargs):
"""
:param DTT Variable: Dictionary that describe struc parameters
"""
super(VariableGenerator, self).__init__(**kwargs)
def _inicialize(self):
"""inicialize action run variables"""
if self._get_state().value > ActionStateType.created.value:
return
self._output = self._get_valid_output()
self._set_state(ActionStateType.initialized)
self._hash.update(bytes(self.__class__.__name__, "utf-8"))
if "Variable" in self._variables and \
isinstance(self._variables["Variable"], DTT) and \
self._variables["Variable"]._is_set():
self._hash.update(bytes(self._variables['Variable']._get_unique_text(True), "utf-8"))
def _get_valid_output(self):
"""Construct output from set items"""
if "Variable" in self._variables and isinstance(self._variables["Variable"], DTT):
return self._variables["Variable"]
def _get_variables_script(self):
"""return array of variables python scripts"""
var = super(VariableGenerator, self)._get_variables_script()
variable=Formater.format_variable(
"Variable", self._variables["Variable"]._get_settings_script(), 0)
if len(variable)>0 and len(variable[-1])>0:
variable[-1] = variable[-1][:-1]
var.append(variable)
return var
def _check_params(self):
"""check if all require params is set"""
err = super(VariableGenerator, self)._check_params()
if "Variable" not in self._variables:
self._add_error(err, "Variable parameter is required")
elif not isinstance(self._variables["Variable"], DTT):
self._add_error(err, "Parameter 'Variable' is not valid DTT variable")
else:
if not self._variables["Variable"]._is_set():
self._add_error(err, "Variable in variable generator must be set")
if self._output is None:
self._add_error(err,
|
"Can't determine valid output")
return err
def validate(self):
"""validate variables, inp
|
ut and output"""
err = super(VariableGenerator, self).validate()
return err
class RangeGenerator(GeneratorActionType):
name = "RangeGenerator"
"""Display name of action"""
description = "Generator for generation parallel list"
"""Display description of action"""
def __init__(self, **kwargs):
"""
:param Dictionary Items: Dictionary that describe generated
way how generate result. Values have this attributes::
- :name(string):require variabvar.append(["AllCases=True"])le name
- :value(float): require variable middle value
- :step(float): default 1, step for generation
- :n_plus(integer): default 1, amount of plus steps
- :n_minus(integer): default 1, amount of minus steps
- :exponential(bool): default False, if true value is processed exponencially
:param AllCases bool: Cartesian product, default value False:
"""
super(RangeGenerator, self).__init__(**kwargs)
def _inicialize(self):
"""inicialize action run variables"""
if self._get_state().value > ActionStateType.created.value:
return
self._output = self.__get_output_from_items()
template = copy.deepcopy(self._output.subtype)
# first is middle
self._output.add_item(template)
for item in self._variables['Items']:
if not isinstance(item, dict):
continue
if 'name' in item and item['name'] in template:
if 'name' in item:
setattr(template, item['name'], item['value'])
# Output computation is made in inicialize time, count of cycles
# is known for statistics (easier get_statiscics function)
for item in self._variables['Items']:
if 'AllCases' in self._variables and self._variables['AllCases']:
ready = copy.deepcopy(self._output)
for template_i in ready:
self._generate_step(template_i, item)
else:
self._generate_step(template, item)
self._set_state(ActionStateType.initialized)
self._hash.update(bytes(self.__class__.__name__, "utf-8"))
self._hash.update(bytes(self._output._get_unique_text(True), "utf-8"))
def __get_output_from_items(self):
"""Construct output from set items"""
params = {}
if 'Items' in self._variables:
if isinstance(self._variables['Items'], list):
for item in self._variables['Items']:
if isinstance(item, dict):
if 'name' in item:
try:
params[item['name']] = Float()
except:
pass
if len(params)>1:
return Ensemble(Struct(params))
return None
def _get_variables_script(self):
"""return array of variables python scripts"""
var = super(RangeGenerator, self)._get_variables_script()
i=1
items=['Items = [']
for item in self._variables['Items']:
if not 'name' in item or not 'value' in item:
continue
items.append(" {0}'name':'{1}'".format('{', item['name']))
if 'value' in item:
items[i] += (", 'value':{0}".format(str(item['value'])))
if 'step' in item:
items[i] += (", 'step':{0}".format(str(item['step'])))
if 'n_plus' in item:
items[i] += (", 'n_plus':{0}".format(str(item['n_plus'])))
if 'n_minus' in item:
items[i] += (", 'n_minus':{0}".format(str(item['n_minus'])))
if 'exponential' in item and item['exponential']:
items[i] += (", 'exponential':True")
items[i] += "},"
i += 1
if i>1:
items[i-1]=items[i-1][:-1]
items.append(']')
var.append(items)
if 'AllCases' in self._variables and self._variables["AllCases"]:
var.append(["AllCases=True"])
return var
def _generate_step(self, template, item):
"""generate plus and minus variants for one item"""
plus = 1
if 'n_plus' in item:
plus = item['n_plus']
minus = 1
if 'n_minus' in item:
minus = item['n_minus']
step = 1
if 'step' in item:
step = item['step']
for i in range(0, plus):
template2 =copy.deepcopy(template)
rstep = (i+1)*step
if 'exponential' in item and item['exponential']:
rstep = 2**i*step
setattr(template2, item['name'],
getattr(template2, item['name']).value+rstep)
self._output.add_item(template2)
for i in range(0, minus):
template2 =copy.deepcopy(template)
rstep = (i+1)*step
if 'exponential' in item and item['exponential']:
rstep = 2**i*step
setattr(template2, item['name'],
getattr(template2, item['name']).value-rstep)
self._output.add_item(template2)
def _check_params(self):
"""check if all require params is set"""
err = super(RangeGenerator, self)._check_params()
if self._output is None:
se
|
jfriedly/rethinkdb
|
scripts/nightly-test/launch_nightly_test.py
|
Python
|
agpl-3.0
| 1,755
| 0.015954
|
#!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
#
|
Usage: ./launch_nightly_test.py
# --test-host <hostname>[:<port>]
# (--email <name>@<address>)*
# [--title "<Title>"]
# [-- <flags for full_test_driver.py>]
import sys, subprocess, os, optparse
if __name__ != "__main__":
raise ImportError("It doesn't make any sense to import this as a module")
parser = optparse.OptionParser()
parser.add_option("--test-host", action = "store", dest = "test_host")
parser.ad
|
d_option("--email", action = "append", dest = "emailees")
parser.add_option("--title", action = "store", dest = "title")
parser.set_defaults(title = "Nightly test", emailees = [])
(options, args) = parser.parse_args()
if options.test_host is None:
parser.error("You must specify --test-host.")
def escape(arg):
return "'" + arg.replace("'", "'\''") + "'"
tar_proc = subprocess.Popen(
["tar", "--create", "--gzip", "--file=-", "-C", os.path.dirname(__file__), "--"] +
["full_test_driver.py", "remotely.py", "simple_linear_db.py", "renderer"],
stdout = subprocess.PIPE
)
try:
command = "SLURM_CONF=/home/teapot/slurm/slurm.conf ./full_test_driver.py %s >output.txt 2>&1" % " ".join(escape(x) for x in args)
curl_cmd_line = ["curl", "-f", "-X", "POST", "http://%s/spawn/" % options.test_host]
curl_cmd_line += ["-F", "tarball=@-"]
curl_cmd_line += ["-F", "command=%s" % command]
curl_cmd_line += ["-F", "title=%s" % options.title]
for emailee in options.emailees:
curl_cmd_line += ["-F", "emailee=%s" % emailee]
subprocess.check_call(curl_cmd_line, stdin = tar_proc.stdout)
finally:
try:
tar_proc.terminate()
except IOError:
pass
|
Tunous/StringSheet
|
setup.py
|
Python
|
mit
| 977
| 0.001024
|
import os
from io import open
from setuptools import setup
about = {}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'stringsheet', '__init__.py'), encoding='utf-8') as f:
|
for line in f:
if line.startsw
|
ith('__'):
(key, value) = line.split('=')
about[key.strip()] = value.strip().strip('\'')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=readme,
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
license=about['__license__'],
packages=['stringsheet'],
install_requires=[
'httplib2',
'apiclient',
'lxml',
'google-api-python-client'
],
entry_points={
'console_scripts': [
'stringsheet = stringsheet.cli:main'
]
}
)
|
paulydboy/Quad-Vision
|
DroneControl/Camera.py
|
Python
|
apache-2.0
| 177
| 0.00565
|
from Sensor import Sensor
i
|
mport cv2
class Camera(Sensor):
def __init__(self):
self._cap = cv2.VideoCapture(0)
def read(self):
r
|
eturn self._cap.read()
|
Videoclases/videoclases
|
videoclases/migrations/0014_auto_20150726_1441.py
|
Python
|
gpl-3.0
| 975
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('videoclases', '0013_tarea_profesor'),
]
|
operations = [
migrations.AddField(
model_name='grupo',
name='alternativa_2',
field=models.CharField(max_length=100, null=True, blank=True),
),
migrations.AddField(
model_name='grupo',
name='alternativa_3',
field
|
=models.CharField(max_length=100, null=True, blank=True),
),
migrations.AddField(
model_name='grupo',
name='alternativa_correcta',
field=models.CharField(max_length=100, null=True, blank=True),
),
migrations.AddField(
model_name='grupo',
name='pregunta',
field=models.CharField(max_length=100, null=True, blank=True),
),
]
|
gepuro/csvkit
|
csvkit/utilities/csvsql.py
|
Python
|
mit
| 7,388
| 0.006091
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
from csvkit import sql
from csvkit import table
from csvkit import CSVKitWriter
from csvkit.cli import CSVKitUtility
class CSVSQL(CSVKitUtility):
description = 'Generate SQL statements for one or more CSV files, create execute those statements directly on a database, and execute one or more SQL queries.'
override_flags = ['l', 'f']
def add_arguments(self):
self.argparser.add_argument(metavar="FILE", nargs='*', dest='input_paths', default=['-'],
help='The CSV file(s) to operate on. If omitted, will accept input on STDIN.')
self.argparser.add_argument('-y', '--snifflimit', dest='snifflimit', type=int,
help='Limit CSV dialect sniffing to the specified number of bytes. Specify "0" to disable sniffing entirely.')
self.argparser.add_argument('-i', '--dialect', dest='dialect', choices=sql.DIALECTS,
help='Dialect of SQL to generate. Only valid when --db is not specified.')
self.argparser.add_argument('--db', dest='connection_string',
help
|
='If present, a sqlalchemy connection string to use to directly execute generated SQL on a database.')
self.argparser.add_argument('--query', default=None,
help='Execute one or more SQL queries delimited by ";" and output the result of the last query as CSV.')
self.argparser.add_argument('--insert', dest='insert', action='store_true',
help='In addition to creating the table, also insert the data into the table. Only valid when --db is specified.')
|
self.argparser.add_argument('--tables', dest='table_names',
help='Specify one or more names for the tables to be created. If omitted, the filename (minus extension) or "stdin" will be used.')
self.argparser.add_argument('--no-constraints', dest='no_constraints', action='store_true',
help='Generate a schema without length limits or null checks. Useful when sampling big tables.')
self.argparser.add_argument('--no-create', dest='no_create', action='store_true',
help='Skip creating a table. Only valid when --insert is specified.')
self.argparser.add_argument('--blanks', dest='blanks', action='store_true',
help='Do not coerce empty strings to NULL values.')
self.argparser.add_argument('--no-inference', dest='no_inference', action='store_true',
help='Disable type inference when parsing the input.')
self.argparser.add_argument('--db-schema', dest='db_schema',
help='Optional name of database schema to create table(s) in.')
def main(self):
connection_string = self.args.connection_string
do_insert = self.args.insert
query = self.args.query
self.input_files = []
for path in self.args.input_paths:
self.input_files.append(self._open_input_file(path))
if self.args.table_names:
table_names = self.args.table_names.split(',')
else:
table_names = []
# If one or more filenames are specified, we need to add stdin ourselves (if available)
if sys.stdin not in self.input_files:
try:
if not sys.stdin.isatty():
self.input_files.insert(0, oepn("/dev/stdin", "r", encoding="utf-8"))
except:
pass
# Create an SQLite database in memory if no connection string is specified
if query and not connection_string:
connection_string = "sqlite:///:memory:"
do_insert = True
if self.args.dialect and connection_string:
self.argparser.error('The --dialect option is only valid when --db is not specified.')
if do_insert and not connection_string:
self.argparser.error('The --insert option is only valid when --db is also specified.')
if self.args.no_create and not do_insert:
self.argparser.error('The --no-create option is only valid --insert is also specified.')
# Establish database validity before reading CSV files
if connection_string:
try:
engine, metadata = sql.get_connection(connection_string)
except ImportError:
raise ImportError('You don\'t appear to have the necessary database backend installed for connection string you\'re trying to use. Available backends include:\n\nPostgresql:\tpip install psycopg2\nMySQL:\t\tpip install MySQL-python\n\nFor details on connection strings and other backends, please see the SQLAlchemy documentation on dialects at: \n\nhttp://www.sqlalchemy.org/docs/dialects/\n\n')
conn = engine.connect()
trans = conn.begin()
for f in self.input_files:
try:
# Try to use name specified via --table
table_name = table_names.pop(0)
except IndexError:
if f == sys.stdin:
table_name = "stdin"
else:
# Use filename as table name
table_name = os.path.splitext(os.path.split(f.name)[1])[0]
csv_table = table.Table.from_csv(
f,
name=table_name,
snifflimit=self.args.snifflimit,
blanks_as_nulls=(not self.args.blanks),
infer_types=(not self.args.no_inference),
no_header_row=self.args.no_header_row,
**self.reader_kwargs
)
f.close()
if connection_string:
sql_table = sql.make_table(
csv_table,
table_name,
self.args.no_constraints,
self.args.db_schema,
metadata
)
# Create table
if not self.args.no_create:
sql_table.create()
# Insert data
if do_insert and csv_table.count_rows() > 0:
insert = sql_table.insert()
headers = csv_table.headers()
conn.execute(insert, [dict(zip(headers, row)) for row in csv_table.to_rows()])
# Output SQL statements
else:
sql_table = sql.make_table(csv_table, table_name, self.args.no_constraints)
self.output_file.write('%s\n' % sql.make_create_table_statement(sql_table, dialect=self.args.dialect))
if connection_string:
if query:
# Execute specified SQL queries
queries = query.split(';')
rows = None
for q in queries:
if q:
rows = conn.execute(q)
# Output result of last query as CSV
try:
output = CSVKitWriter(self.output_file, **self.writer_kwargs)
if not self.args.no_header_row:
output.writerow(rows._metadata.keys)
for row in rows:
output.writerow(row)
except AttributeError:
pass
trans.commit()
conn.close()
def launch_new_instance():
utility = CSVSQL()
utility.main()
if __name__ == "__main__":
launch_new_instance()
|
hardbyte/python-can
|
can/interfaces/pcan/pcan.py
|
Python
|
lgpl-3.0
| 21,138
| 0.001277
|
"""
Enable basic CAN over a PCAN USB device.
"""
import logging
import time
from datetime import datetime
import platform
from typing import Optional
from packaging import version
from ...message import Message
from ...bus import BusABC, BusState
from ...util import len2dlc, dlc2len
from ...exceptions import CanError, CanOperationError, CanInitializationError
from .basic import (
PCAN_BITRATES,
PCAN_FD_PARAMETER_LIST,
PCAN_CHANNEL_NAMES,
PCAN_NONEBUS,
PCAN_BAUD_500K,
PCAN_TYPE_ISA,
PCANBasic,
PCAN_ERROR_OK,
PCAN_ALLOW_ERROR_FRAMES,
PCAN_PARAMETER_ON,
PCAN_RECEIVE_EVENT,
PCAN_API_VERSION,
PCAN_DEVICE_NUMBER,
PCAN_ERROR_QRCVEMPTY,
PCAN_ERROR_BUSLIGHT,
PCAN_ERROR_BUSHEAVY,
PCAN_MESSAGE_EXTENDED,
PCAN_MESSAGE_RTR,
PCAN_MESSAGE_FD,
PCAN_MESSAGE_BRS,
PCAN_MESSAGE_ESI,
PCAN_MESSAGE_ERRFRAME,
PCAN_MESSAGE_STANDARD,
TPCANMsgFD,
TPCANMsg,
PCAN_CHANNEL_IDENTIFYING,
PCAN_LISTEN_ONLY,
PCAN_PARAMETER_OFF,
TPCANHandle,
PCAN_PCIBUS1,
PCAN_USBBUS1,
PCAN_PCCBUS1,
PCAN_LANBUS1,
PCAN_CHANNEL_CONDITION,
PCAN_CHANNEL_AVAILABLE,
PCAN_CHANNEL_FEATURES,
FEATURE_FD_CAPABLE,
PCAN_DICT_STATUS,
)
# Set up logging
log = logging.getLogger("can.pcan")
MIN_PCAN_API_VERSION = version.parse("4.2.0")
try:
# use the "uptime" library if available
import uptime
# boottime() and fromtimestamp() are timezone offset, so the difference is not.
if uptime.boottime() is None:
boottimeEpoch = 0
else:
boottimeEpoch = (uptime.boottime() - datetime.fromtimestamp(0)).total_seconds()
except ImportError as error:
log.warning(
"uptime library not available, timestamps are relative to boot time and not to Epoch UTC",
exc_info=True,
)
boottimeEpoch = 0
try:
# Try builtin Python 3 Windows API
from _overlapped import CreateEvent
from _winapi import WaitForSingleObject, WAIT_OBJECT_0, INFINITE
HAS_EVENTS = True
except ImportError:
try:
# Try pywin32 package
from win32event import CreateEvent
from win32event import WaitForSingleObject, WAIT_OBJECT_0, INFINITE
HAS_EVENTS = True
except ImportError:
# Use polling instead
HAS_EVENTS = False
class PcanBus(BusABC):
def __init__(
self,
channel="PCAN_USBBUS1",
state=BusState.ACTIVE,
bitrate=500000,
*args,
**kwargs,
):
"""A PCAN USB interface to CAN.
On top of the usual :class:`~can.Bus` methods provided,
the PCAN interface
|
includes the :meth:`~can.interface.pcan.PcanBus.flash`
and :meth:`~can.interface.pcan.PcanBus.status` methods.
:param str channel:
The can interface name. An example would be 'PCAN_USBBUS1'.
Alternatively the value can be an int with the numerical v
|
alue.
Default is 'PCAN_USBBUS1'
:param can.bus.BusState state:
BusState of the channel.
Default is ACTIVE
:param int bitrate:
Bitrate of channel in bit/s.
Default is 500 kbit/s.
Ignored if using CanFD.
:param bool fd:
Should the Bus be initialized in CAN-FD mode.
:param int f_clock:
Clock rate in Hz.
Any of the following:
20000000, 24000000, 30000000, 40000000, 60000000, 80000000.
Ignored if not using CAN-FD.
Pass either f_clock or f_clock_mhz.
:param int f_clock_mhz:
Clock rate in MHz.
Any of the following:
20, 24, 30, 40, 60, 80.
Ignored if not using CAN-FD.
Pass either f_clock or f_clock_mhz.
:param int nom_brp:
Clock prescaler for nominal time quantum.
In the range (1..1024)
Ignored if not using CAN-FD.
:param int nom_tseg1:
Time segment 1 for nominal bit rate,
that is, the number of quanta from (but not including)
the Sync Segment to the sampling point.
In the range (1..256).
Ignored if not using CAN-FD.
:param int nom_tseg2:
Time segment 2 for nominal bit rate,
that is, the number of quanta from the sampling
point to the end of the bit.
In the range (1..128).
Ignored if not using CAN-FD.
:param int nom_sjw:
Synchronization Jump Width for nominal bit rate.
Decides the maximum number of time quanta
that the controller can resynchronize every bit.
In the range (1..128).
Ignored if not using CAN-FD.
:param int data_brp:
Clock prescaler for fast data time quantum.
In the range (1..1024)
Ignored if not using CAN-FD.
:param int data_tseg1:
Time segment 1 for fast data bit rate,
that is, the number of quanta from (but not including)
the Sync Segment to the sampling point.
In the range (1..32).
Ignored if not using CAN-FD.
:param int data_tseg2:
Time segment 2 for fast data bit rate,
that is, the number of quanta from the sampling
point to the end of the bit.
In the range (1..16).
Ignored if not using CAN-FD.
:param int data_sjw:
Synchronization Jump Width for fast data bit rate.
Decides the maximum number of time quanta
that the controller can resynchronize every bit.
In the range (1..16).
Ignored if not using CAN-FD.
"""
self.channel_info = str(channel)
self.fd = kwargs.get("fd", False)
pcan_bitrate = PCAN_BITRATES.get(bitrate, PCAN_BAUD_500K)
hwtype = PCAN_TYPE_ISA
ioport = 0x02A0
interrupt = 11
if not isinstance(channel, int):
channel = PCAN_CHANNEL_NAMES[channel]
self.m_objPCANBasic = PCANBasic()
self.m_PcanHandle = channel
self.check_api_version()
if state is BusState.ACTIVE or state is BusState.PASSIVE:
self.state = state
else:
raise ValueError("BusState must be Active or Passive")
if self.fd:
f_clock_val = kwargs.get("f_clock", None)
if f_clock_val is None:
f_clock = "{}={}".format("f_clock_mhz", kwargs.get("f_clock_mhz", None))
else:
f_clock = "{}={}".format("f_clock", kwargs.get("f_clock", None))
fd_parameters_values = [f_clock] + [
"{}={}".format(key, kwargs.get(key, None))
for key in PCAN_FD_PARAMETER_LIST
if kwargs.get(key, None) is not None
]
self.fd_bitrate = " ,".join(fd_parameters_values).encode("ascii")
result = self.m_objPCANBasic.InitializeFD(
self.m_PcanHandle, self.fd_bitrate
)
else:
result = self.m_objPCANBasic.Initialize(
self.m_PcanHandle, pcan_bitrate, hwtype, ioport, interrupt
)
if result != PCAN_ERROR_OK:
raise PcanCanInitializationError(self._get_formatted_error(result))
result = self.m_objPCANBasic.SetValue(
self.m_PcanHandle, PCAN_ALLOW_ERROR_FRAMES, PCAN_PARAMETER_ON
)
if result != PCAN_ERROR_OK:
if platform.system() != "Darwin":
raise PcanCanInitializationError(self._get_formatted_error(result))
else:
# TODO Remove Filter when MACCan actually supports it:
# https://github.com/mac-can/PCBUSB-Library/
log.debug(
"Ignoring error. PCAN_ALLOW_ERROR_FRAMES is still unsupported by OSX Library PCANUSB v0.10"
)
if HAS_EVENTS:
self._recv_event = CreateEvent(None, 0, 0, None)
result = self.m_objPCANBasic.SetValue(
self.m_PcanHandle, PCAN_RECEIVE_EVENT, self._recv_event
)
|
Morbotic/pronto-distro
|
externals/libbot-drc/bot2-procman/python/src/bot_procman/printf_request_t.py
|
Python
|
lgpl-2.1
| 1,785
| 0.006162
|
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class printf_request_t(object):
__slots__ = ["sheriff_id"]
def __init__(self):
self.sheriff_id = 0
def encode(
|
self):
buf = BytesIO()
buf.write(printf_request_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">i", self.sheriff_id))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != printf_request_t._get_packed_fingerprint():
raise ValueError("Decode error")
return p
|
rintf_request_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = printf_request_t()
self.sheriff_id = struct.unpack(">i", buf.read(4))[0]
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if printf_request_t in parents: return 0
tmphash = (0x96c8b21fe50022a4) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if printf_request_t._packed_fingerprint is None:
printf_request_t._packed_fingerprint = struct.pack(">Q", printf_request_t._get_hash_recursive([]))
return printf_request_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
|
edgedb/edgedb
|
edb/server/compiler/__init__.py
|
Python
|
apache-2.0
| 1,345
| 0
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2018-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for tbhe specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from .compiler import Compiler
from .compiler import CompileContext, CompilerDatabaseState
from .compiler import compile_edgeql_script
from .compiler import load_std_schema
from .compiler import new_compiler, new_compiler_context
from .dbstate import QueryUnit
from .enums import Capability, CompileStatementMode, Cardinality
from .enums import IoFormat
__all__ = (
'Cardinality',
'Compiler',
'CompileContext',
'CompilerDatabaseState',
'QueryUnit',
'Capability', 'CompileStatementMode', 'IoFormat',
'compile_edgeql_script',
'load_std_sche
|
ma'
|
,
'new_compiler',
'new_compiler_context',
)
|
codilime/cloudify-diamond-plugin
|
diamond_agent/tests/test_single_node.py
|
Python
|
apache-2.0
| 8,798
| 0.000114
|
import os
import time
import json
import cPickle
import tempfile
from testtools import TestCase, ExpectedException
import psutil
from cloudify.workflows import local
class TestSingleNode(TestCase):
def setUp(self):
super(TestSingleNode, self).setUp()
os.environ['MANAGEMENT_IP'] = '127.0.0.1'
self.is_uninstallable = True
self.env = None
def tearDown(self):
super(TestSingleNode, self).tearDown()
if self.env and self.is_uninstallable:
self.env.execute('uninstall', task_retries=0)
# custom handler + custom collector
def test_custom_collectors(self):
log_path = tempfile.mktemp()
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'test_handler.TestHandler': {
'path': 'handlers/test_handler.py',
'config': {
'log_path': log_path,
}
}
}
},
'collectors_config': {
|
'TestCollector': {
'path': 'collectors/test.py',
'config': {
'name': 'metric',
'value': 42,
},
},
},
}
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
if not is_created(log_path):
self.fail('file {0} expected, but not found!'.format(log_path))
with open(log_path, 'r') as fh:
m
|
etric = cPickle.load(fh)
metric_path = metric.path.split('.')
collector_config = \
inputs['collectors_config']['TestCollector']['config']
self.assertEqual(collector_config['name'], metric_path[5])
self.assertEqual(collector_config['value'], metric.value)
self.assertEqual(self.env.name, metric_path[0])
self.assertEqual('TestCollector', metric_path[4])
node_instances = self.env.storage.get_node_instances()
host_instance_id, node_id, node_instance_id = get_ids(node_instances,
'node')
self.assertEqual(host_instance_id, metric_path[1])
self.assertEqual(node_id, metric_path[2])
self.assertEqual(node_instance_id, metric_path[3])
def test_cloudify_handler_format(self):
log_path = tempfile.mktemp()
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'test_handler.TestHandler': {
'path': 'handlers/test_handler.py',
'config': {
'log_path': log_path,
'output_cloudify_format': True,
}
}
}
},
'collectors_config': {
'TestCollector': {
'path': 'collectors/test.py',
'config': {
'name': 'metric',
'value': 42,
},
},
},
}
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
if not is_created(log_path):
self.fail('file {0} expected, but not found!'.format(log_path))
with open(log_path, 'r') as fh:
metric = json.loads(cPickle.load(fh))
collector_config = \
inputs['collectors_config']['TestCollector']['config']
node_instances = self.env.storage.get_node_instances()
expected_host, expected_node_name, expected_node_id = get_ids(
node_instances, 'node')
expected_path = collector_config['name']
expected_metric = collector_config['value']
expected_deployment_id = self.env.name
expected_name = 'TestCollector'
expected_unit = ''
expected_type = 'GAUGE'
expected_service = '.'.join([
expected_deployment_id,
expected_node_name,
expected_node_id,
expected_name,
expected_path
])
self.assertEqual(expected_path, metric['path'])
self.assertEqual(expected_metric, metric['metric'])
self.assertEqual(expected_deployment_id, metric['deployment_id'])
self.assertEqual(expected_name, metric['name'])
self.assertEqual(expected_unit, metric['unit'])
self.assertEqual(expected_type, metric['type'])
self.assertEqual(expected_host, metric['host'])
self.assertEqual(expected_node_name, metric['node_name'])
self.assertEqual(expected_node_id, metric['node_id'])
self.assertEqual(expected_service, metric['service'])
self.assertTrue(time.time() - 120 <= metric['time'] <= time.time())
# custom handler + no collector
# diamond should run without outputting anything
def test_no_collectors(self):
log_path = tempfile.mktemp()
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'test_handler.TestHandler': {
'path': 'handlers/test_handler.py',
'config': {
'log_path': log_path,
},
}
}
},
'collectors_config': {}
}
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
pid = get_pid(inputs)
if not psutil.pid_exists(pid):
self.fail('Diamond failed to start with empty collector list')
def test_uninstall_workflow(self):
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'diamond.handler.archive.ArchiveHandler': {
'config': {
'log_file': tempfile.mktemp(),
}
}
}
},
'collectors_config': {},
}
self.is_uninstallable = False
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
pid_file = os.path.join(inputs['diamond_config']['prefix'],
'var', 'run', 'diamond.pid')
with open(pid_file, 'r') as pf:
pid = int(pf.read())
if psutil.pid_exists(pid):
self.env.execute('uninstall', task_retries=0)
time.sleep(5)
else:
self.fail('diamond process not running')
self.assertFalse(psutil.pid_exists(pid))
def test_no_handlers(self):
inputs = {
'diamond_config': {
'handlers': {},
},
'collectors_config': {},
}
self.is_uninstallable = False
self.env = self._create_env(inputs)
with ExpectedException(RuntimeError, ".*Empty handlers dict"):
self.env.execute('install', task_retries=0)
def _create_env(self, inputs):
return local.init_env(self._blueprint_path(),
inputs=inputs,
ignored_modules=['worker_installer.tasks',
'plugin_installer.tasks'])
def _blueprint_path(self):
return self._get_resource_path('blueprint', 'single_node.yaml')
def _get_resource_path(self, *args):
return os.path.join(os.path.dirname(__file__), 'resources', *args)
def collector_in_log(path, collector):
with open(path, 'r') as fh:
try:
while True:
metric = cPickle.load(fh)
if metric.path.split('.')[3] == collector:
return
|
CDNoyes/EDL-Py
|
Utils/progress.py
|
Python
|
gpl-3.0
| 521
| 0.011516
|
def progress(current, total, percent=10, iteration=None):
"""
|
Used in a loop to indicate progress
"""
current += 1
if current:
previous = current - 1
else:
previous = current
# print out every percent
frac = percent/100.
value = max(1, frac*total)
return not (int(current/value) == int(previous/value))
if __name__ == "__main__":
for i in range(17):
print(i)
if progress(i, 17):
|
print(r"Another 10% completed")
|
polyaxon/polyaxon
|
core/polyaxon/polypod/compiler/lineage/artifacts_collector.py
|
Python
|
apache-2.0
| 1,081
| 0.000925
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
|
You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the L
|
icense for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
from polyaxon.polyboard.artifacts import V1ArtifactKind, V1RunArtifact
from polyaxon.utils.fqn_utils import to_fqn_name
def collect_lineage_artifacts_path(artifact_path: str) -> Optional[V1RunArtifact]:
name = os.path.basename(artifact_path)
return V1RunArtifact(
name=to_fqn_name(name),
kind=V1ArtifactKind.DIR,
path=artifact_path,
summary={"path": artifact_path},
is_input=True,
)
|
pcbje/gransk
|
gransk/plugins/storage/tests/store_text_test.py
|
Python
|
apache-2.0
| 1,005
| 0.002985
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import unittest
import shutil
import gransk.core.helper as helper
import gransk.cor
|
e.tests.test_helper as test_helper
import gransk.core.document as document
import gransk.plugins.storage.store_text as store_text
class StoreTextTest(unittest.TestCase):
def test_simple(self):
mock_pipeline = test_helper.get_mock_pipeline([])
data_root = os.path.join('local_data', 'unittests')
if os.path.exists(data_root):
shutil.rmtree(data_root)
_store_text = store_text.Subscriber(mock_pipeline)
_store_text.setup({
helpe
|
r.DATA_ROOT: data_root,
'workers': 1
})
doc = document.get_document('mock')
doc.text = 'mock-mock-mock'
_store_text.consume(doc, None)
expected = 'local_data/unittests/text/17404a59-mock'
actual = doc.meta['text_file']
self.assertEquals(expected, actual)
if __name__ == '__main__':
unittest.main()
|
aytuncbeken/Hp-Alm-Purge-Tool
|
PurgeWizard.py
|
Python
|
gpl-3.0
| 8,814
| 0.003744
|
#!/usr/bin/env python3
"""
This Project is a Python based HP ALM Purge Wizard.
This is the main file which do all stuff
For detailed informatin please visit
https://github.com/aytuncbeken/Hp-Alm-Purge-Tool
Author:Aytunc BEKEN
Python Version:3.6
License:GPL
"""
import threading
from concurrent.futures import ThreadPoolExecutor
import AlmConnector
import json
import time
import logging
import configparser
from datetime import datetime
# Global variable for Thread Access
delete_success = 0
delete_fail = 0
delete_total = 0
delete_wait_counter = 0
lock = threading.Lock()
def main():
global delete_total
global delete_success
global delete_fail
global delete_wait_counter
global lock
threadPool = ThreadPoolExecutor(max_workers=10)
start_time = time.time()
config = configparser.ConfigParser()
config.read("PurgeWizard.ini")
alm_host = config["PurgeWizard"]["AlmHost"]
alm_port = config["PurgeWizard"]["AlmPort"]
alm_username = config["PurgeWizard"]["AlmUserName"]
alm_password = config["PurgeWizard"]["AlmPassword"]
alm_domain = config["PurgeWizard"]["AlmDomain"]
alm_project = config["PurgeWizard"]["AlmProject"]
limit_per_page = config["PurgeWizard"]["RecordLimitPerPage"]
date_limit = config["PurgeWizard"]["DeleteOlderThan"]
simulate_delete = config["PurgeWizard"]["SimulateDelete"]
log_file = config["PurgeWizard"]["LogFileWithFullPath"]
delete_with_thread = config["PurgeWizard"]["DeleteWithThread"]
logging.basicConfig(format='[%(asctime)s][%(levelname)s:] %(message)s', filename=log_file, filemode='w',
level=logging.INFO)
logging.info("Starting Alm Purge Wizard with Parameters")
logging.info("AlmHost:%s",alm_host)
logging.info("A
|
lmPort:%s", alm_port)
logging.info("AlmUserName:%s", alm_username)
logging.info("AlmPassword:%s",
|
alm_password)
logging.info("AlmDomain:%s", alm_domain)
logging.info("AlmProject:%s", alm_project)
logging.info("RecordLimitPerPage:%s", limit_per_page)
logging.info("DeleteOlderThan:%s", date_limit)
logging.info("SimulateDelete:%s", simulate_delete)
logging.info("LogFileWithFullPath:%s", log_file)
if not AlmConnector.connect_alm(alm_host, alm_port, alm_username, alm_password,alm_domain,alm_project):
logging.info("Alm Connection failed")
return
if not AlmConnector.check_authentication():
logging.info("Alm Auth Check Failed")
return
if not AlmConnector.get_session():
logging.info("Alm Session Init Failed")
return
logging.info("Alm Connection/Authentication Succeeded")
temp_response = AlmConnector.get_testset_list_with_only_id(None, None)
if temp_response is None:
logging.info("Test Set List Returned None - Exit")
return
test_list = json.loads(temp_response)
test_count = test_list["TotalResults"]
limit = limit_per_page
offset = 0
offset_step = int(limit)
limit_date = datetime.strptime(date_limit,"%Y-%m-%d")
logging.info("Total Test Set Entity Count: %s", test_count)
logging.info("Iterate Over Test Sets - Limit:%s", limit)
logging.info("Date Limit:%s", limit_date)
while True:
logging.info("Iterate Offset:%s" , offset)
temp_response = AlmConnector.get_testset_list_with_only_id(limit, offset)
if temp_response is None:
logging.info("Test Set List Returned None - Exit")
logging.info("Temp Response:%s", temp_response)
return
testset_list = json.loads(temp_response)
testset_count = testset_list["TotalResults"]
if testset_count == 0:
logging.info("Test Set List Returned Zero Records - End Of Loop")
logging.info("Test Set List:%s", testset_list)
break
logging.info("Number Of Records to Process:%s", len(testset_list["entities"]))
for testset in testset_list["entities"]:
testset_id = testset["Fields"][0]["values"][0]["value"]
testset_name = testset["Fields"][1]["values"][0]["value"]
logging.info("")
logging.info("Processing Test Set Id:%s Test Set Name:%s", testset_id, testset_name)
if testset_id is None:
logging.info("Test Set Id is None - Pass Test")
logging.info("Test Set:%s", testset)
logging.info("Test Set Id:%s", testset_id)
continue
temp_response = AlmConnector.get_testcycl_list_by_test_set(testset_id)
if temp_response is None:
logging.info("Test Set Cycle List From Test Set Id Query Returned None - Exit")
logging.info("Temp Response:%s" , temp_response)
break
cycle_json = json.loads(temp_response)
cycle_list = {}
for cycle in cycle_json["entities"]:
cycle_id = cycle["Fields"][2]["values"][0]["value"]
cycle_name = cycle["Fields"][1]["values"][0]["value"]
cycle_list[cycle_id] = cycle_name
logging.info("Test Set Cycle List Extracted From Test Set:%s", testset_id)
logging.info("Test Cycle List:%s",cycle_list)
logging.info("Number Of Test Cycles:%s", len(cycle_list))
for cycle in cycle_list:
logging.info("Test Cycle:%s - %s", cycle, cycle_list[cycle])
temp_response = AlmConnector.get_run_by_testcycl(cycle)
run_list = json.loads(temp_response)
run_total_results = run_list["TotalResults"]
logging.info("Number Of Run:%s",run_total_results)
if run_total_results == 1:
logging.info("Test Cycle Have 1 Run - Keeping - Pass Test")
continue
if run_total_results == 0:
logging.info("Test Cycle Have No Run - Pass Test")
continue
for run in run_list["entities"]:
run_id = run["Fields"][0]["values"][0]["value"]
execution = run["Fields"][1]["values"][0]["value"]
execution_date = datetime.strptime(execution, "%Y-%m-%d")
date_diff = execution_date - limit_date
logging.info("Processing Run Id:%s Execution Date:%s Date Diff:%s" ,run_id, execution_date, date_diff )
if int(date_diff.days) >= 0:
logging.info("Execution Date is not under limit - Pass Test")
continue
if run_id is None:
logging.info("Run Id is None - Pass Run")
logging.info("Run:%s",run)
logging.info("Run Id:%s",run_id)
continue
if run_list["entities"].index(run) >= len(run_list["entities"]) -1:
logging.info("Leaving Most Recent Test - Pass Test")
continue
delete_total = delete_total + 1
if simulate_delete == "False":
if delete_with_thread == "True":
logging.info("Delete Run:%s With Thread",run_id)
lock.acquire()
delete_wait_counter = delete_wait_counter + 1
lock.release()
threadPool.submit(delete_run,run_id)
else:
logging.info("Delete Run:%s Without Thread", run_id)
delete_run(run_id)
else:
logging.info("(SimulateDelete) Pass Delete Run::%s ", run_id)
offset = offset + offset_step
elapsed_time = time.time() - start_time
threadPool.shutdown(wait=True)
logging.info("Purge Ended")
logging.info("Elapsed Time:%s",elapsed_time)
logging.info("Total Run Marked For Delete:%s", delete_total)
logging.info("Total Run Delete Success:%s", delete_success)
logging.info("Total Run Delete Failed:%s",delete_fail)
def delete_run(run_id):
|
lesglaneurs/lesglaneurs
|
presentation/migrations/0016_auto_20160516_0806.py
|
Python
|
gpl-3.0
| 398
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from djang
|
o.db import migrations, models
class Migration(migrati
|
ons.Migration):
dependencies = [
('presentation', '0015_auto_20160515_1658'),
]
operations = [
migrations.RenameField(
model_name='membership',
old_name='membership',
new_name='role',
),
]
|
mistercrunch/panoramix
|
superset/views/base_api.py
|
Python
|
apache-2.0
| 21,953
| 0.000957
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or im
|
plied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import l
|
ogging
from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Type, Union
from apispec import APISpec
from apispec.exceptions import DuplicateComponentNameError
from flask import Blueprint, g, Response
from flask_appbuilder import AppBuilder, Model, ModelRestApi
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.filters import BaseFilter, Filters
from flask_appbuilder.models.sqla.filters import FilterStartsWith
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import lazy_gettext as _
from marshmallow import fields, Schema
from sqlalchemy import and_, distinct, func
from sqlalchemy.orm.query import Query
from superset.extensions import db, event_logger, security_manager
from superset.models.core import FavStar
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.schemas import error_payload_content
from superset.sql_lab import Query as SqllabQuery
from superset.stats_logger import BaseStatsLogger
from superset.typing import FlaskResponse
from superset.utils.core import time_function
logger = logging.getLogger(__name__)
get_related_schema = {
"type": "object",
"properties": {
"page_size": {"type": "integer"},
"page": {"type": "integer"},
"include_ids": {"type": "array", "items": {"type": "integer"}},
"filter": {"type": "string"},
},
}
class RelatedResultResponseSchema(Schema):
value = fields.Integer(description="The related item identifier")
text = fields.String(description="The related item string representation")
class RelatedResponseSchema(Schema):
count = fields.Integer(description="The total number of related values")
result = fields.List(fields.Nested(RelatedResultResponseSchema))
class DistinctResultResponseSchema(Schema):
text = fields.String(description="The distinct item")
class DistincResponseSchema(Schema):
count = fields.Integer(description="The total number of distinct values")
result = fields.List(fields.Nested(DistinctResultResponseSchema))
def statsd_metrics(f: Callable[..., Any]) -> Callable[..., Any]:
"""
Handle sending all statsd metrics from the REST API
"""
def wraps(self: "BaseSupersetModelRestApi", *args: Any, **kwargs: Any) -> Response:
try:
duration, response = time_function(f, self, *args, **kwargs)
except Exception as ex:
self.incr_stats("error", f.__name__)
raise ex
self.send_stats_metrics(response, f.__name__, duration)
return response
return functools.update_wrapper(wraps, f)
class RelatedFieldFilter:
# data class to specify what filter to use on a /related endpoint
# pylint: disable=too-few-public-methods
def __init__(self, field_name: str, filter_class: Type[BaseFilter]):
self.field_name = field_name
self.filter_class = filter_class
class BaseFavoriteFilter(BaseFilter): # pylint: disable=too-few-public-methods
"""
Base Custom filter for the GET list that filters all dashboards, slices
that a user has favored or not
"""
name = _("Is favorite")
arg_name = ""
class_name = ""
""" The FavStar class_name to user """
model: Type[Union[Dashboard, Slice, SqllabQuery]] = Dashboard
""" The SQLAlchemy model """
def apply(self, query: Query, value: Any) -> Query:
# If anonymous user filter nothing
if security_manager.current_user is None:
return query
users_favorite_query = db.session.query(FavStar.obj_id).filter(
and_(
FavStar.user_id == g.user.get_id(),
FavStar.class_name == self.class_name,
)
)
if value:
return query.filter(and_(self.model.id.in_(users_favorite_query)))
return query.filter(and_(~self.model.id.in_(users_favorite_query)))
class BaseSupersetModelRestApi(ModelRestApi):
"""
Extends FAB's ModelResApi to implement specific superset generic functionality
"""
csrf_exempt = False
method_permission_name = {
"bulk_delete": "delete",
"data": "list",
"data_from_cache": "list",
"delete": "delete",
"distinct": "list",
"export": "mulexport",
"import_": "add",
"get": "show",
"get_list": "list",
"info": "list",
"post": "add",
"put": "edit",
"refresh": "edit",
"related": "list",
"related_objects": "list",
"schemas": "list",
"select_star": "list",
"table_metadata": "list",
"test_connection": "post",
"thumbnail": "list",
"viz_types": "list",
}
order_rel_fields: Dict[str, Tuple[str, str]] = {}
"""
Impose ordering on related fields query::
order_rel_fields = {
"<RELATED_FIELD>": ("<RELATED_FIELD_FIELD>", "<asc|desc>"),
...
}
""" # pylint: disable=pointless-string-statement
related_field_filters: Dict[str, Union[RelatedFieldFilter, str]] = {}
"""
Declare the filters for related fields::
related_fields = {
"<RELATED_FIELD>": <RelatedFieldFilter>)
}
""" # pylint: disable=pointless-string-statement
filter_rel_fields: Dict[str, BaseFilter] = {}
"""
Declare the related field base filter::
filter_rel_fields_field = {
"<RELATED_FIELD>": "<FILTER>")
}
""" # pylint: disable=pointless-string-statement
allowed_rel_fields: Set[str] = set()
"""
Declare a set of allowed related fields that the `related` endpoint supports
""" # pylint: disable=pointless-string-statement
text_field_rel_fields: Dict[str, str] = {}
"""
Declare an alternative for the human readable representation of the Model object::
text_field_rel_fields = {
"<RELATED_FIELD>": "<RELATED_OBJECT_FIELD>"
}
""" # pylint: disable=pointless-string-statement
allowed_distinct_fields: Set[str] = set()
openapi_spec_component_schemas: Tuple[Type[Schema], ...] = tuple()
"""
Add extra schemas to the OpenAPI component schemas section
""" # pylint: disable=pointless-string-statement
add_columns: List[str]
edit_columns: List[str]
list_columns: List[str]
show_columns: List[str]
responses = {
"400": {"description": "Bad request", "content": error_payload_content},
"401": {"description": "Unauthorized", "content": error_payload_content},
"403": {"description": "Forbidden", "content": error_payload_content},
"404": {"description": "Not found", "content": error_payload_content},
"422": {
"description": "Could not process entity",
"content": error_payload_content,
},
"500": {"description": "Fatal error", "content": error_payload_content},
}
def __init__(self) -> None:
# Setup statsd
self.stats_logger = BaseStatsLogger()
# Add base API spec base query parameter schemas
if self.apispec_parameter_schemas is None: # type: ignore
self.apispec_parameter_schemas = {}
self.apispec_parameter_schemas["get_related_schema"] = get_related_schema
if self.openapi_spec_component_schemas is None:
|
midonet/python-neutron-plugin-midonet
|
midonet/neutron/tests/unit/test_midonet_plugin.py
|
Python
|
apache-2.0
| 4,032
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Midokura Japan K.K.
# Copyright (C) 2013 Midokura PTE LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
from oslo_utils import importutils
from midonet.neutron.db import task # noqa
from neutron.extensions import portbindings
from neutron.tests.unit import _test_extension_portbindings as test_bindings
import neutron.tests.unit.test_db_plugin as test_plugin
import neutron.tests.unit.test_extension_ext_gw_mode as test_gw_mode
import neutron.tests.unit.test_extension_security_group as sg
import neutron.tests.unit.test_l3_plugin as test_l3_plugin
from oslo_config import cfg
MIDOKURA_PKG_PATH = "midonet.neutron.plugin"
MIDOKURA_EXT_PATH = "midonet.neutron.extensions"
MIDONET_PLUGIN_NAME = ('%s.MidonetPluginV2' % MIDOKURA_PKG_PATH)
class MidonetPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self,
plugin=MIDONET_PLUGIN_NAME,
ext_mgr=None,
service_plugins=None):
self.midoclient_mock = mock.MagicMock()
self.midoclient_mock.midonetclient.neutron.client.return_value = True
modules = {
'midonetclient': self.midoclient_mock,
'midonetclient.neutron': self.midoclient_mock.neutron,
'midonetclient.neutron.client': self.midoclient_mock.client,
}
self.module_patcher = mock.patch.dict('sys.modules', modules)
self.module_patcher.start()
# import midonetclient here because it needs proper mock objects to be
# assigned to this module first. 'midoclient_mock' object is the
# mock object used for this module.
from midonetclient.neutron.client import MidonetClient
client_class = MidonetClient
self.mock_class = client_class()
extensions_path = importutils.import_module(
MIDOKURA_EXT_PATH).__file__
cfg.CONF.set_override('api_extensions_path',
os.path.dirname(extensions_path))
super(MidonetPluginV2TestCase, self).s
|
etUp(plugin=plugin)
def tearDown(self):
super(MidonetPluginV2TestCase, self).tearDown()
self.module_patcher.stop()
class TestMidonetNetworksV2(MidonetPluginV2TestCase,
test_plugin.TestNetworksV2):
pass
cla
|
ss TestMidonetL3NatTestCase(MidonetPluginV2TestCase,
test_l3_plugin.L3NatDBIntTestCase):
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(MIDONET_PLUGIN_NAME)
class TestMidonetSecurityGroup(MidonetPluginV2TestCase,
sg.TestSecurityGroups):
pass
class TestMidonetSubnetsV2(MidonetPluginV2TestCase,
test_plugin.TestSubnetsV2):
pass
class TestMidonetPortsV2(MidonetPluginV2TestCase,
test_plugin.TestPortsV2):
def test_vif_port_binding(self):
with self.port(name='myname') as port:
self.assertEqual('midonet', port['port']['binding:vif_type'])
self.assertTrue(port['port']['admin_state_up'])
class TestMidonetPluginPortBinding(MidonetPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_MIDONET
HAS_PORT_FILTER = True
class TestExtGwMode(MidonetPluginV2TestCase,
test_gw_mode.ExtGwModeIntTestCase):
pass
|
callorico/django-rest-framework
|
tests/test_views.py
|
Python
|
bsd-2-clause
| 3,650
| 0
|
from __future__ import unicode_literals
import copy
import sys
from django.test import TestCase
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory
from rest_framework.views import APIView
factory = APIRequestFactory()
if sys.version_info[:2] >= (3, 4):
JSON_ERROR = 'JSON parse error - Expecting value:'
else:
JSON_ERROR = 'JSON parse error - No JSON object could be decoded'
class BasicView(APIView):
def get(self, request, *args, **kwargs):
return Response({'method': 'GET'})
def post(self, request, *args, **kwargs):
return Response({'method': 'POST', 'data': request.data})
@api_view(['GET', 'POST', 'PUT', 'PATCH'])
def basic_view(request):
if request.method == 'GET':
return {'method': 'GET'}
elif request.method == 'POST':
return {'method': 'POST', 'data': request.data}
elif request.method == 'PUT':
return {'method': 'PUT', 'data': request.data}
elif request.method == 'PATCH':
return {'method': 'PATCH', 'data': request.data}
class ErrorView(APIView):
def get(self, request, *args, **kwargs):
raise Exception
@api_view(['GET'])
def error_view(request):
raise Exception
def sanitise_json_error(error_dict):
"""
Exact contents of JSON error messages depend on the installed version
of json.
"""
ret = copy.copy(error_dict)
chop = len(JSON_ERROR)
ret['detail'] = ret['detail'][:chop]
return ret
class ClassBasedViewIntegrationTests(TestCase):
def setUp(self):
self.view = BasicView.as_view()
def test_400_parse_error(self):
request = factory.post('/', 'f00bar', content_type='application/json')
response = self.view(request)
expected = {
'detail': JSON_ERROR
}
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(sanitise_json_error(response.data), expected)
class FunctionBasedViewIntegrationTests(TestCase):
def setUp(self):
self.view = basic_view
def test_400_parse_error(self):
request = factory.post('/', 'f00bar', content_type='application/json')
response = self.view(request)
expected = {
'detail': JSON_ERROR
}
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(sanitise_json_error(response.data), expected)
class TestCustomExceptionHandler(TestCase):
def setUp(self):
self.DEFAULT_HANDLER = api_settings.EXCEPTION_HANDLER
def exception_handler(exc, request):
return Response('Error!', status=status.HTTP_400_BAD_REQUEST)
api_settings.EXCEPTION_HANDLER = exception_handler
def tearDown(self):
api_settings.EXCEPTION_HANDLER = self.DEFAULT_HANDLER
def test_class_based_view_exception_handler(self):
view = ErrorView.as_view()
request = factory.get('/', content_type='application/json')
response = view(request)
expected = 'Error!'
self.assertEqual(respo
|
nse.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, expected)
def test_function_based_view_exception_handler(self):
view = error_view
request = factory.get('/', content_type='application/json')
response = view(request)
expected = 'Error!'
self.assertEqual(response.status_code
|
, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, expected)
|
kamal-gade/rockstor-core
|
src/rockstor/storageadmin/views/plugin.py
|
Python
|
gpl-3.0
| 1,928
| 0.004668
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the
|
hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu
|
.org/licenses/>.
"""
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
import rest_framework_custom as rfc
from storageadmin.util import handle_exception
from storageadmin.models import (Plugin, InstalledPlugin)
from storageadmin.serializers import PluginSerializer
import time
import logging
logger = logging.getLogger(__name__)
class PluginView(rfc.GenericView):
serializer_class = PluginSerializer
def get_queryset(self, *args, **kwargs):
return Plugin.objects.all()
#if 'available_plugins' in request.session:
# if request.session['available_plugins'] == None:
# request.session['available_plugins'] = ['backup']
#else:
# request.session['available_plugins'] = ['backup']
#if 'installed_plugins' in request.session:
# if request.session['installed_plugins'] == None:
# request.session['installed_plugins'] = []
#else:
# request.session['installed_plugins'] = []
#data = {
# 'installed': request.session['installed_plugins'],
# 'available': request.session['available_plugins']
# }
#return Response(data)
|
martenson/ansible-common-roles
|
paths/library/zfs_permissions.py
|
Python
|
mit
| 9,934
| 0.004832
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Nate Coraor <nate@coraor.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zfs_permissions
short_description: Manage zfs administrative permissions
description:
- Manages ZFS file system administrative permissions on Solaris and FreeBSD. See zfs(1M) for more information about the properties.
version_added: "1.10"
options:
name:
description:
- File system or volume name e.g. C(rpool/myfs)
required: true
state:
description:
- Whether to allow (C(present)), or unallow (C(absent)) a permission.
required: true
choices: [present, absent]
users:
description:
- Users to whom permission(s) should be granted, separated by commas.
required: false
groups:
description:
- Groups to whom permission(s) should be granted, separated by commas.
required: false
everyone:
description:
- Apply permissions to everyone.
required: false
default: false
choices: ['on','off']
permissions:
description:
- The permission(s) to delegate, separated by commas (required if C(state) is C(present))
required: false
choices: ['allow','clone','create','destroy',...]
local:
description:
- Apply permissions to C(name) "locally" (C(zfs allow -l))
required: false
default: null
choices: ['on','off']
descendents:
description:
- Apply permissions to C(name)'s descendents (C(zfs allow -d))
required: false
default: null
choices: ['on','off']
recursive:
description:
- Unallow permissions recursively (ignored when C(state) is C(present))
required: false
default: false
choices: ['on','off']
author: "Nate Coraor (@natefoo)"
'''
EXAMPLES = '''
# Grant `zfs allow` and `unallow` permission to the `adm` user with local+descendents scope
- zfs_permissions: name=rpool/myfs users=adm permissions=allow,unallow
# Grant `zfs send` to everyone, plus the group `backup`
- zfs_permissions: name=rpool/myvol groups=backup everyone=yes permissions=send
# Grant `zfs send,receive` to users `foo` and `bar` with local scope only
- zfs_permissions: name=rpool/myfs users=foo,bar permissions=send,receive local=yes
# Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
- zfs_permissions: name=rpool/myfs state=absent everyone=yes
'''
import sys
class ZfsPermissions(object):
def __init__(self, module):
self.module = module
self.name = module.params.get('name')
self.state = module.params.get('state')
self.users = module.params.get('users') or []
self.groups = module.params.get('groups') or []
self.everyone = module.boolean(module.params.get('everyone'))
self.perms = module.params.get('permissions') or []
self.recursive = module.boolean(module.params.get('recursive'))
self.scope = None
self.changed = False
self.__current_perms = None
if self.state == 'present' and not self.perms:
self.module.fail_json(msg='The `permissions` option is required for state=present')
if self.state == 'present' and not (self.users or self.groups or self.everyone):
self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set')
for splittable in ('users', 'groups', 'perms'):
if getattr(self, splittable):
setattr(self, splittable, getattr(self, splittable).split(','))
local = module.boolean(module.params.get('local'))
descendents = module.boolean(module.params.get('descendents'))
if (local and descendents) or (not local and not descendents):
self.scope = 'ld'
elif local:
self.scope = 'l'
elif descendents:
self.scope = 'd'
else:
self.module.fail_json(msg='Impossible value for local and descendents')
self.subcommand = 'allow'
self.recursive_opt = []
if self.state == 'absent':
self.subcommand = 'unallow'
if self.recursive:
self.recursive_opt = ['-r']
else:
self.recursive_opt = []
self.run()
@property
def current_perms(self):
if self.__current_perms is None:
rc, out, err = self.run_command(['zfs', 'allow', self.name])
if rc:
self.module.fail_json(msg='Getting permissions for %s failed: %s' % (self.name, err))
perms = dict(l = dict(u=dict(), g=dict(), e=[]),
d = dict(u=dict(), g=dict(), e=[]),
ld = dict(u=dict(), g=dict(), e=[]))
reading = None
for line in out.splitlines():
if line == 'Local permissions:':
reading = 'l'
elif line == 'Descendent permissions:':
reading = 'd'
elif line == 'Local+Descendent permissions:':
reading = 'ld'
elif line.s
|
tartswith('\tuser '):
user, cur_perms = line.split()[1:3]
|
perms[reading]['u'][user] = cur_perms.split(',')
elif line.startswith('\tgroup '):
group, cur_perms = line.split()[1:3]
perms[reading]['g'][group] = cur_perms.split(',')
elif line.startswith('\teveryone '):
perms[reading]['e'] = line.split()[1].split(',')
self.__current_perms = perms
return self.__current_perms
def run_command(self, cmd):
progname = cmd[0]
cmd[0] = self.module.get_bin_path(progname, True)
return self.module.run_command(cmd)
def change_required(self, ent_type):
# zfs allow/unallow are idempotent, so we only need to do this for Ansible's changed flag
rval = []
if ent_type == 'u':
entities = self.users
elif ent_type == 'g':
entities = self.groups
for ent in entities:
ent_perms = self.current_perms[self.scope][ent_type].get(ent, None)
if self.state == 'present' and ent_perms is None:
rval.append(ent)
elif self.state == 'absent' and ent_perms is not None:
rval.append(ent)
elif ent_perms is not None:
for perm in self.perms:
if ((self.state == 'present' and perm not in ent_perms) or
(self.state == 'absent' and perm in ent_perms)):
# at least one desired permission is absent, or
# at least one undesired permission is present
rval.append(ent)
break
return rval
def run(self):
def run_cmd(args):
cmd = ['zfs', self.subcommand] + self.recursive_opt + ['-%s' % self.scope] + args
if self.perms:
cmd = cmd + [','.join(self.perms)]
cmd = cmd + [self.name]
if self.module.check_mode:
return 'Check mode skipped execution of: %s' % ' '.join(cmd)
rc, out, err = self.run_command(cmd)
if rc:
msg = 'Changing permissions with `%s` failed: %s' % (' '.join(cmd), err)
self.module.fail_json(msg=msg)
return out
stdout = ''
for ent_type in ('u', 'g'):
change = self.change_required(ent_type)
if change:
|
Visrozar/DjangoRecommender
|
shop/migrations/0003_auto_20170217_1533.py
|
Python
|
mit
| 1,327
| 0.003014
|
# -*- coding: utf-8 -*-
# Generated by D
|
jango 1.10.5 on 2017-02-17 10:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_shops_longitude'),
]
operations
|
= [
migrations.CreateModel(
name='ShopsProducts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AddField(
model_name='products',
name='description',
field=models.CharField(default=0, max_length=400),
),
migrations.AddField(
model_name='shopsproducts',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Products'),
),
migrations.AddField(
model_name='shopsproducts',
name='shop',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Shops'),
),
migrations.AddField(
model_name='products',
name='shops',
field=models.ManyToManyField(through='shop.ShopsProducts', to='shop.Shops'),
),
]
|
Juniper/nova
|
nova/virt/libvirt/volume/iscsi.py
|
Python
|
apache-2.0
| 3,538
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, eithe
|
r express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Libvirt volume driver for iSCSI"""
from os_brick import exception as os_brick_exception
from os_brick.initiator import connector
from oslo_log import log as logging
import nova.conf
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volum
|
e
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, host):
super(LibvirtISCSIVolumeDriver, self).__init__(host,
is_block_dev=True)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISCSI', utils.get_root_helper(),
use_multipath=CONF.libvirt.volume_use_multipath,
device_scan_attempts=CONF.libvirt.num_volume_scan_tries,
transport=self._get_transport())
def _get_transport(self):
if CONF.libvirt.iscsi_iface:
transport = CONF.libvirt.iscsi_iface
else:
transport = 'default'
return transport
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
conf.driver_io = "native"
return conf
def connect_volume(self, connection_info, disk_info, instance):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume")
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, disk_dev, instance):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume")
try:
self.connector.disconnect_volume(connection_info['data'], None)
except os_brick_exception.VolumeDeviceNotFound as exc:
LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
return
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev, instance)
def extend_volume(self, connection_info, instance):
"""Extend the volume."""
LOG.debug("calling os-brick to extend iSCSI Volume", instance=instance)
new_size = self.connector.extend_volume(connection_info['data'])
LOG.debug("Extend iSCSI Volume %s; new_size=%s",
connection_info['data']['device_path'],
new_size, instance=instance)
return new_size
|
REANNZ/faucet
|
faucet/acl.py
|
Python
|
apache-2.0
| 33,575
| 0.001519
|
"""Configuration for ACLs."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import netaddr
from os_ken.ofproto import ether
from faucet import valve_of
from faucet import valve_acl
from faucet.valve_of import MATCH_FIELDS, OLD_MATCH_FIELDS
from faucet.conf import Conf, test_config_condition, InvalidConfigError
from faucet.valve_table import wildcard_table
class ACL(Conf):
"""Contains the state for an ACL, including the configuration.
ACL Config
ACLs are configured under the 'acls' configuration block. The acls block
contains a dictionary of individual acls each keyed by its name.
Each acl contains a list of rules, a packet will have the first matching rule
applied to it.
Each rule is a dictionary containing the single key 'rule' with the value the
matches and actions for the rule.
The matches are key/values based on the ryu RESTFul API.
The key 'actions' contains a dictionary with keys/values as follows:
* allow (int): if 1 allow the packet to continue through the Faucet \
pipeline, if 0 drop the packet.
* force_port_vlan (int): if 1, do not verify the VLAN/port association \
for this packet and override any VLAN ACL on the forced VLAN.
* meter (str): meter to apply to the packet
* output (dict): used to output a packet directly. details below.
* cookie (int): set flow cookie to this value on this flow
The output action contains a dictionary with the following elements:
* tunnel (dict): the tunnel formation, creates a tunnel from the applied port(s) \
to the specified destination
* port (int or string): the port to output the packet to
* ports (list): a list of the ports (int or string) to output the packet to
* set_fields (list): a list of fields to set with values
* pop_vlans: (int): pop the packet vlan before outputting
* vlan_vid: (int): push the vlan vid on the packet when outputting
* vlan_vids: (list): push the list of vlans on the packet when outputting, with option eth_type
* swap_vid (int): rewrite the vlan vid of the packet when outputting
* failover (dict): Output with a failover port (experimental)
"""
defaults = {
'rules': None,
'exact_match': False,
'dot1x_assigned': False,
}
defaults_types = {
'rules': list,
'exact_match': bool,
'dot1x_assigned': bool,
}
rule_types = {
'cookie': int,
'actions': dict,
'description': str,
}
actions_types = {
'meter': str,
'mirror': (str, int),
'output': (dict, list),
'allow': int,
'force_port_vlan': int,
'ct': dict,
}
output_actions_types = {
'tunnel': dict,
'port': (str, int),
'ports': list,
'failover': dict,
'set_fields': list,
'pop_vlans': int,
'swap_vid': int,
'vlan_vid': int,
'vlan_vids': list,
}
ct_action_types = {
'flags': int,
'alg': int,
'table': int,
'zone': int,
'zone_src': int,
'clear': bool,
'nat': dict,
}
ct_action_nat_types = {
'flags': int,
'range_ipv4_min': str,
'range_ipv4_max': str,
'range_ipv6_min': str,
'range_ipv6_max': str,
'range_proto_min': int,
'range_proto_max': int
}
tunnel_types = {
'type': (str, None),
'tunnel_id': (str, int, None),
'dp': str,
'port': (str, int, None),
'exit_instructions': (list, None),
'maintain_encapsulation': bool,
'bi_directional': bool,
'reverse': bool,
}
mutable_attrs = frozenset(['tunnel_sources'])
def __init__(self, _id, dp_id, conf):
self.rules = []
self.exact_match = None
self.dot1x_assigned = None
self.meter = False
self.matches = {}
self.set_fields = set()
self._ports_resolved = False
# Tunnel info maintains the tunnel output information for each tunnel rule
self.tunnel_dests = {}
# Tunnel sources is a list of the sources in the network for this ACL
self.tunnel_sources = {}
# Tunnel rules is the rules for each tunnel in the ACL for each source
self.dyn_tunnel_rules = {}
self.dyn_reverse_tunnel_rules = {}
for match_fields in (MATCH_FIELDS, OLD_MATCH_FIELDS):
self.rule_types.update({match: (str, int) for match in match_fields})
conf = copy.deepcopy(conf)
if isinstance(conf, dict):
rules = conf.get('rules', [])
elif isinstance(conf, list):
rules = conf
conf = {}
else:
raise InvalidConfigError(
'ACL conf is an invalid type %s' % _id)
conf['rules'] = []
for rule in rules:
normalized_rule = rule
if isinstance(rule, dict):
normalized_rule = rule.get('rule', rule)
if normalized_rule is None:
normalized_rule = {k: v for k, v in rule.items() if v is not None}
test_config_condition(not isinstance(normalized_rule, dict), (
'ACL rule is %s not %s (%s)' % (type(normalized_rule), dict, rules)))
conf['rules'].append(normalized_rule)
super().__init__(_id, dp_id, conf)
def finalize(self):
self._ports_resolved = True
super().finalize()
def check_config(self):
test_config_condition(
not self.rules, 'no rules found for ACL %s' % self._id)
for rule in self.rules:
self._check_conf_types(rule, self.rule_types)
for rule_field, rule_conf in rule.items():
if rule_field == 'cookie':
test_config_condition(
rule_conf < 0 or rule_conf > 2**16,
'rule cookie v
|
alue
|
must be 0-2**16')
elif rule_field == 'actions':
test_config_condition(
not rule_conf,
'Missing rule actions in ACL %s' % self._id)
self._check_conf_types(rule_conf, self.actions_types)
for action_name, action_conf in rule_conf.items():
if action_name == 'output':
if isinstance(action_conf, (list, tuple)):
# New ordered format
for subconf in action_conf:
# Make sure only one specified action per list element
test_config_condition(
len(subconf) > 1,
'ACL ordered output must have only one action per element')
# Ensure correct action format
self._check_conf_types(subconf, self.output_actions_types)
else:
# Old format
self._check_conf_types(
action_conf, self.output_actions_types)
elif action_name == 'ct':
self._check_conf_types(action_conf, self.ct_action_types)
# if clear set, make sure nothing else is
if 'clear' in action_conf and action_conf['clear']:
test_config_condition(
|
josephmjoy/robotics
|
python_robotutils/robotutils/strmap_helper.py
|
Python
|
mit
| 3,444
| 0.003194
|
"""
This module contains a helper to extract various kinds of primitive data types
from a dictionary of strings.
"""
class StringDictHelper:
"""
Helper class to extract primitive types from a dictionary of strings. This is a port
of Java robotutils class StringmapHelper. The special values 'true' and 'false' (in
any combinations of case) represent boolean True and False. This MUST NOT be changed
as it is part of the 'structured message' used in the robotcomm protocol and in
in configuration and logging - across multiple languages.
"""
def __init__(self, dct):
"""Constructs a helper for the given dict {dct}"""
self._dct = dct
def get_as_str(self, key, default, pattern=None):
"""
Returns a string - either parsed from map of {key} or {defaultValue}.
param key -- key to lookup.
default -- default value to use if the key did not exist, the value was not
parseable or did not match {pattern}. This value does not need
match {pattern}.
pattern -- [If not None] Regex.Pattern object representing valid strings to accept.
"""
ret = str(self._dct.get(key, default))
if pattern:
return ret if pattern.fullmatch(ret) else default
return ret
def get_as_bool(self, key, default):
"""
Returns a bool - either parsed from map of {key} or {default}.
key -- key to lookup.
default -- default value to use if the key did not exist or the value was not
parseable.
"""
val = self._dct.get(key)
ret = default
if val:
val = val.lower()
if val == 'true':
ret = True
elif val == 'false':
ret = False
return ret
def get_as_num(self, key, default, minval=None, maxval=None):
"""
Returns a number - either parsed from map of {key} or {default}.
key -- key to lookup.
default -- default value to use if the key did exist, the value was not
parseable or out of bounds. This value does not need to be between
{minval} and {maxval}.
NOTE: The *type* of this default value is used to
determine the type of return value. So, if a floating point value is expected,
specify a float default value!
[minval] -- Optional inclusive minimum to accept.
[maxval] -- Optional inclusive (not exclusive) maximum to accept.
"""
val = self._dct.get(key)
ret = default
if val:
try:
# Below we extract type (int or float or ??) and use it to construct the result!
type_ =
|
type(default)
ret1 = t
|
ype_(val)
valid = (minval is None or ret1 >= minval) and (maxval is None or ret1 <= maxval)
ret = ret1 if valid else default
except ValueError:
ret = default
return ret
if __name__ == '__main__':
D = dict(a='abc', b='true', c=42, d=1.5)
H = StringDictHelper(D)
AV = H.get_as_str('a', '')
BV = H.get_as_bool('b', False)
CV = H.get_as_num('c', 100)
DV = H.get_as_num('d', 0.0)
assert AV == 'abc'
assert BV is True
assert CV == 42
assert abs(DV-1.5) < 1E-10
print("StringDictHelper seems to work")
|
wubr2000/googleads-python-lib
|
examples/dfp/v201411/audience_segment_service/get_all_audience_segments.py
|
Python
|
apache-2.0
| 1,845
| 0.009214
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all audience segments.
To create audience segments, run create_audience_segments.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize client object.
client = dfp.DfpClient.LoadFromStorage()
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v201411')
# Create statement object to select all audience segments.
statement = dfp.FilterStatement()
# Get audience segments by statement.
while True:
response = audience_segment_service.getAudienceSegmentsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for segment in response['results']:
print ('Audience segment with id \'%s\' and name '
'\'%s\' of size %s was found.' %
(segment['id'], segment['name'], segment
|
['size']))
statem
|
ent.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
bendavis78/gnome-tweak-tool
|
gtweak/tweaks/tweak_group_shell_extensions.py
|
Python
|
gpl-3.0
| 13,162
| 0.004862
|
import os.path
import zipfile
import tempfile
import logging
import json
from gi.repository import Gtk
from gi.repository import GLib
from gi.repository import Pango
from operator import itemgetter
from gtweak.utils import extract_zip_file, execute_subprocess
from gtweak.gshellwrapper import GnomeShell, GnomeShellFactory
from gtweak.tweakmodel import Tweak
from gtweak.widgets import FileChooserButton, build_label_beside_widget, build_horizontal_sizegroup, build_tight_button, UI_BOX_SPACING, ListBoxTweakGroup
from gtweak.egowrapper import ExtensionsDotGnomeDotOrg
from gtweak.utils import DisableExtension
def N_(x): return x
def _fix_shell_version_for_ego(version):
#extensions.gnome.org uses a weird versioning system,
#3.10.0 is 3.10, 3.10.0.x (x is ignored)
#drop the pico? release
version = '.'.join(version.split('.')[0:3])
if version[-1] == '0':
#if it is .0, drop that too
return '.'.join(version.split('.')[0:2])
else:
return version
class _ShellExtensionTweak(Gtk.ListBoxRow, Tweak):
def __init__(self, shell, ext, **options):
Gtk.ListBoxRow.__init__(self)
Tweak.__init__(self, ext["name"], ext.get("description",""), **options)
self.hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
self.hbox.props.border_width = 10
self.hbox.props.spacing = UI_BOX_SPACING
self._shell = shell
state = ext.get("state")
uuid = ext["uuid"]
sw = Gtk.Switch()
sw.props.vexpand = False
sw.props.valign = Gtk.Align.CENTER
sw.set_active(self._shell.extension_is_active(state, uuid))
sw.connect('notify::active', self._on_extension_toggled, uuid)
self.hbox.pack_start(sw, False, False, 0)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
lbl_name = Gtk.Label(xalign=0.0)
lbl_name.set_markup("<span size='medium'><b>"+ext["name"].lower().capitalize()+"</b></span>")
lbl_desc = Gtk.Label(xalign=0.0)
desc = ext["description"].lower().capitalize().split('\n')[0]
lbl_desc.set_markup("<span foreground='#A19C9C' size='small'>"+desc+"</span>")
lbl_desc.props.ellipsize = Pango.EllipsizeMode.END
vbox.pack_start(lbl_name, False, False, 0)
vbox.pack_start(lbl_desc, False, False, 0)
self.hbox.pack_start(vbox, True, True, 10)
info = None
warning = None
sensitive = False
if state == GnomeShell.EXTENSION_STATE["ENABLED"] or \
state == GnomeShell.EXTENSION_STATE["DISABLED"] or \
state == GnomeShell.EXTENSION_STATE["INITIALIZED"]:
sensitive = True
elif state == GnomeShell.EXTENSION_STATE["DOWNLOADING"]:
info = _("Extension downloading")
elif state == GnomeShell.EXTENSION_STATE["ERROR"]:
warning = _("Error loading extension")
elif state == GnomeShell.EXTENSION_STATE["OUT_OF_DATE"]:
warning = _("Extension does not support shell version")
else:
warning = _("Unknown extension error")
logging.critical(warning)
sw.set_sensitive(sensitive)
if info:
inf = self.make_image("dialog-information-symbolic", info)
self.hbox.pack_start(inf, False, False, 0)
if warning:
wg = self.make_image("dialog-warning-symbolic", warning)
self.hbox.pack_start(wg, False, False, 0)
if self._shell.SUPPORTS_EXTENSION_PREFS:
prefs = os.path.join(ext['path'], "prefs.js")
if os.path.exists(prefs):
icon = Gtk.Image()
icon.set_from_icon_n
|
ame("emblem-system-symbolic", Gtk.IconSize.BUTTON)
btn = Gtk.Button()
btn.props.vexpand = False
btn.props.valign = Gtk.Align.CENTER
btn.add
|
(icon)
btn.connect("clicked", self._on_configure_clicked, uuid)
self.hbox.pack_start(btn, False, False, 0)
btn = Gtk.Button(_("Remove"))
btn.props.vexpand = False
btn.props.valign = Gtk.Align.CENTER
btn.set_sensitive(False)
self.hbox.pack_start(btn, False, False, 0)
if ext.get("type") == GnomeShell.EXTENSION_TYPE["PER_USER"]:
btn.get_style_context().add_class("suggested-action")
btn.set_sensitive(True)
btn.connect("clicked", self._on_extension_delete, uuid, ext["name"])
self.deleteButton = btn
de = DisableExtension()
de.connect('disable-extension', self._on_disable_extension, sw)
self.add(self.hbox)
self.widget_for_size_group = None
def _on_disable_extension(self, de, sw):
sw.set_active(False)
def _on_configure_clicked(self, btn, uuid):
execute_subprocess(['gnome-shell-extension-prefs', uuid], block=False)
def _on_extension_toggled(self, sw, active, uuid):
if not sw.get_active():
self._shell.disable_extension(uuid)
else:
self._shell.enable_extension(uuid)
def _on_extension_delete(self, btn, uuid, name):
path = os.path.join(self._shell.EXTENSION_DIR, uuid)
if os.path.exists(path):
first_message = _("Uninstall Extension")
second_message = _("Do you want to uninstall the '%s' extension?") % name
dialog = Gtk.MessageDialog(
self.main_window,0,
type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.YES_NO,
message_format=first_message)
dialog.format_secondary_text(second_message)
response = dialog.run()
if response == Gtk.ResponseType.YES:
self._shell.uninstall_extension(uuid)
self.set_sensitive(False)
btn.get_style_context().remove_class("suggested-action")
dialog.destroy()
def _on_extension_update(self, btn, uuid):
self._shell.uninstall_extension(uuid)
btn.get_style_context().remove_class("suggested-action")
btn.set_label(_("Updating"))
self.set_sensitive(False)
self._shell.install_remote_extension(uuid,self.reply_handler, self.error_handler, btn)
def reply_handler(self, proxy_object, result, user_data):
if result == 's':
self.deleteButton.show()
user_data.hide()
self.set_sensitive(True)
def error_handler(self, proxy_object, result, user_data):
user_data.set_label(_("Error"))
print result
def add_update_button(self, uuid):
self.deleteButton.hide()
updateButton = Gtk.Button(_("Update"))
updateButton.get_style_context().add_class("suggested-action")
updateButton.connect("clicked", self._on_extension_update, uuid)
updateButton.show()
self.hbox.pack_end(updateButton, False, False, 0)
def make_image(self, icon, tip):
image = Gtk.Image.new_from_icon_name(icon, Gtk.IconSize.MENU)
image.set_tooltip_text(tip)
return image
class _ShellExtensionInstallerTweak(Gtk.Box, Tweak):
def __init__(self, shell, **options):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL)
Tweak.__init__(self, _("Install Shell Extension"), "", **options)
self._shell = shell
chooser = FileChooserButton(
_("Select an extension"),
True,
["application/zip"])
chooser.connect("file-set", self._on_file_set)
hb = Gtk.HBox(spacing=UI_BOX_SPACING)
hb.pack_start(
Gtk.LinkButton.new_with_label("https://extensions.gnome.org",_("Get more extensions")),
False, False, 0)
hb.pack_start(chooser, False, False, 0)
build_label_beside_widget(self.name, hb, hbox=self)
self.widget_for_size_group = hb
self.loaded = self._shell is not None
def _on_file_set(self, chooser):
f = chooser.get_filename()
|
skosukhin/spack
|
lib/spack/spack/cmd/gpg.py
|
Python
|
lgpl-2.1
| 6,389
| 0
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack.util.gpg import Gpg
import argparse
import spack
import os
description = "handle GPG actions for spack"
section = "developer"
level = "long"
def setup_parser(subparser):
setup_parser.parser = subparser
subparsers = subparser.add_subparsers(help='GPG sub-commands')
verify = subparsers.add_parser('verify')
verify.add_argument('package', type=str,
help='the package to verify')
verify.add_argument('signature', type=str, nargs='?',
help='the signature file')
verify.set_defaults(func=gpg_verify)
trust = subparsers.add_parser('trust')
trust.add_argument('keyfile', type=str,
help='add a key to the trust store')
trust.set_defaults(func=gpg_trust)
untrust = subparsers.add_parser('untrust')
untrust.add_argument('--signing', action='store_true',
help='allow untrusting signing keys')
untrust.add_argument('keys', nargs='+', type=str,
help='remove keys from the trust store')
untrust.set_defaults(func=gpg_untrust)
sign = subparsers.add_parser('sign')
sign.add_argument('--output', metavar='DEST', type=str,
help='the directory to place signatures')
sign.add_argument('--key', metavar='KEY', type=str,
help='the key to use for signing')
sign.add_argument('--clearsign', action='store_true',
help='if specified, create a clearsign signature')
sign.add_argument('package', type=str,
help='the package to sign')
sign.set_defaults(func=gpg_sign)
create = subparsers.add_parser('create')
create.add_argument('name', type=str,
help='the name to use for the new key')
create.add_argument('email', type=str,
help='the email address to use for the new key')
create.add_argument('--comment', metavar='COMMENT', type=str,
default='GPG created for Spack',
help='a description for the intended use of the key')
create.add_argument('--expires', metavar='EXPIRATION', type=str,
default='0', help='when the key should expire')
create.add_argument('--export', metavar='DEST', type=str,
help='export the public key to a file')
create.set_defaults(func=gpg_create)
list = subparsers.add_parser('list')
list.add_argument('--trusted', action='store_true',
default=True, help='list trusted keys')
list.add_argument('--signing', action='store_true',
help='list keys which may be used for signing')
list.set_defaults(func=gpg_list)
init = subparsers.add_parser('init')
init.add_argument('--from', metavar='DIR', type=str,
dest='import_dir', help=argparse.SUPPRESS)
init.set_defaults(func=gpg_init)
export = subparsers.add_parser('export')
export.add_argument('location', type=str,
help='where to export keys')
export.add_argument('keys', nargs='*',
help='the keys to export; '
'al
|
l secret keys if unspecified')
export.set_defaults(func=gpg_export)
def gpg_create(args):
if args.export:
old_sec_keys = Gpg.signing_keys()
Gpg.create(name=args.name, email=args.email,
comment=args.comment, expires=args.expires)
if args.export:
new_sec_keys = set(Gpg.signing_keys())
new_keys = new_sec_keys.difference(old_sec_
|
keys)
Gpg.export_keys(args.export, *new_keys)
def gpg_export(args):
keys = args.keys
if not keys:
keys = Gpg.signing_keys()
Gpg.export_keys(args.location, *keys)
def gpg_list(args):
Gpg.list(args.trusted, args.signing)
def gpg_sign(args):
key = args.key
if key is None:
keys = Gpg.signing_keys()
if len(keys) == 1:
key = keys[0]
elif not keys:
raise RuntimeError('no signing keys are available')
else:
raise RuntimeError('multiple signing keys are available; '
'please choose one')
output = args.output
if not output:
output = args.package + '.asc'
# TODO: Support the package format Spack creates.
Gpg.sign(key, args.package, output, args.clearsign)
def gpg_trust(args):
Gpg.trust(args.keyfile)
def gpg_init(args):
import_dir = args.import_dir
if import_dir is None:
import_dir = spack.gpg_keys_path
for root, _, filenames in os.walk(import_dir):
for filename in filenames:
if not filename.endswith('.key'):
continue
Gpg.trust(os.path.join(root, filename))
def gpg_untrust(args):
Gpg.untrust(args.signing, *args.keys)
def gpg_verify(args):
# TODO: Support the package format Spack creates.
signature = args.signature
if signature is None:
signature = args.package + '.asc'
Gpg.verify(signature, args.package)
def gpg(parser, args):
if args.func:
args.func(args)
|
petrjasek/superdesk-core
|
superdesk/system/health.py
|
Python
|
agpl-3.0
| 1,753
| 0
|
"""Health Check API
Use to check system status, will report "green" or "red" for each component
plus overall for "status"::
{
"status": "green",
"celery": "green",
"elastic": "green",
"mongo": "green",
"redis": "green"
}
"""
import logging
import superdesk
from typing import Callable, List, Tuple
from flask import Blueprint, current_app as app
bp = Blueprint("system", __name__)
logger = logging.getLogger(__name__)
def mongo_health() -> bool:
info = app.data.mongo.pymongo().cx.server_info()
return bool(info["ok"])
def elastic_health() -> bool:
health = app.data.elastic.es.cluster.health()
return health["status"] in ("green", "yellow")
def celery_health() -> bool:
with app.celery.connection_for_write() as conn:
conn.connect()
return conn.connected
def redis_health() -> bool:
info = app.redis.info()
return bool(info)
def human(status: bool) -> str:
return "green" if status else "red"
checks: List[Tuple[str, Callable[[], bool]]] = [
("mongo", mongo_health),
("elastic", elastic_health),
("celery", celery_health),
|
("redis", redis_health),
]
@bp.route("/system/health", methods=["GET", "OPTIONS"])
def health():
output = {
"application_name": app.config.get("APPLICATION_NAME"),
}
status = True
for key, check_func in checks:
try:
result = check_func()
except Exception as err:
logger.exception("error checking %s: %s"
|
, key, err)
result = False
status = status and result
output[key] = human(result)
output["status"] = human(status)
return output
def init_app(app) -> None:
superdesk.blueprint(bp, app)
|
DawudH/scrapy_real-estate
|
plot/print_progressbar.py
|
Python
|
mit
| 1,262
| 0.014286
|
# -*- coding: utf-8 -*-
import sys
def print_progress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
|
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
copied from: http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
With slight adjustment so that it allows
|
just one iteration (total = 0)
"""
formatStr = "{0:." + str(decimals) + "f}"
percent = formatStr.format(100 * (iteration / float(total))) if not total == 0 else formatStr.format(100)
filledLength = int(round(barLength * iteration / float(total))) if not total == 0 else int(round(barLength))
bar = '█' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
|
mdblv2/joatu-django
|
application/joatu/local_settings.py
|
Python
|
apache-2.0
| 2,019
| 0.004458
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('studio', 'mdbl@live.com'),
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '/Users/studio/Sites/joatu-master/database.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
ALLOWED_HOSTS = ['.joatu.azurewebsites.n
|
et']
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@h8_wz=yshx96$%%tm$id#96gbllw3je7)%fhx@lja+_c%_(n&'
# Additional locations of static fil
|
es
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
('common', '/Users/studio/Sites/joatu-master/static/img/common'),
('css', '/Users/studio/Sites/joatu-master/static/css'),
)
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
STATIC_URL = '/static/'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/Users/studio/Sites/joatu-master/media/'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/Users/studio/Sites/joatu-master/templates',
)
|
zrzka/blackmamba
|
blackmamba/log.py
|
Python
|
mit
| 2,546
| 0.000393
|
#!python3
"""Logging module.
**This module must not introduce dependency on any other Black Mamba
modules and must be importable on any other platform**.
Why custom module instead of the bundled one? Several reasons:
* not to interfere with Pythonista logging settings,
* unable to convince Pythonista to use my colors,
* etc.
Default log level is INFO. You can use `blackmamba.log.set_level`
to change effective log level. Available log levels are:
* `ERROR`
* `WARNING`
* `INFO`
* `DEBUG`
* `NOTSET`
If you'd like to silent Black Mamba messages, it's recommended to set log
level to `ERROR`::
import blackmamba.log as log
log.set_level(log.ERROR)
"""
try:
import console
except ImportError:
console = None
ERROR = 40
"""Only errors are logged.""
|
"
WARNING = 30
"""Only warnings and errors are logged."""
INFO = 20
"""Informational messages, warnings and errors are logged."""
DEBUG = 10
"""Debug, information messages, warnings and errors are logged."""
NOTSET = 0
"""All messages are logged."""
_level = INFO
_COLORS = {
WARNING: (1, 0.5, 0),
ERROR: (1, 0, 0)
}
def get_level() -> int:
"""Return effective log level.
Returns:
Effective log level.
"""
return _level
def set
|
_level(level: int):
"""Set effective log level.
Args:
level: Log level to set.
"""
global _level
_level = level
def _log(level, *args, **kwargs):
if _level > level:
return
color = _COLORS.get(level, None)
if console and color:
console.set_color(*color)
print(*args, **kwargs)
if console and color:
console.set_color()
def debug(*args, **kwargs):
"""Log message with `DEBUG` level.
Args:
args: Passed to `print`.
kwargs: Passed to `print`.
"""
_log(DEBUG, *args, **kwargs)
def info(*args, **kwargs):
"""Log message with `INFO` level.
Args:
args: Passed to `print`.
kwargs: Passed to `print`.
"""
_log(INFO, *args, **kwargs)
def warn(*args, **kwargs):
"""Log message with `WARNING` level.
Args:
args: Passed to `print`.
kwargs: Passed to `print`.
"""
_log(WARNING, *args, **kwargs)
def error(*args, **kwargs):
"""Log message with `ERROR` level.
Args:
args: Passed to `print`.
kwargs: Passed to `print`.
"""
_log(ERROR, *args, **kwargs)
def issue(*args, **kwargs):
error(*args, **kwargs)
error('Please, file an issue at {}'.format('https://github.com/zrzka/blackmamba/issues'))
|
novas0x2a/ctypesgen
|
ctypesgencore/parser/cparser.py
|
Python
|
bsd-3-clause
| 6,868
| 0.00364
|
#!/usr/bin/env python
'''
Parse a C source file.
To use, subclass CParser and override its handle_* methods. Then instantiate
the class with a string to parse.
'''
__docformat__ = 'restructuredtext'
import operator
import os.path
import re
import sys
import time
import warnings
import preprocessor
import yacc
import cgrammar
import cdeclarations
# --------------------------------------------------------------------------
# Lexer
# --------------------------------------------------------------------------
class CLexer(object):
def __init__(self, cparser):
self.cparser = cparser
self.type_names = set()
self.in_define = False
def input(self, tokens):
self.tokens = tokens
self.pos = 0
def token(self):
while self.pos < len(self.tokens):
t = self.tokens[self.pos]
self.pos += 1
if not t:
break
if t.type == 'PP_DEFINE':
self.in_define = True
elif t.type == 'PP_END_DEFINE':
self.in_define = False
# Transform PP tokens into C tokens
elif t.type == 'LPAREN':
t.type = '('
elif t.type == 'PP_NUMBER':
t.type = 'CONSTANT'
elif t.type == 'IDENTIFIER' and t.value in cgrammar.keywords:
t.type = t.value.upper()
elif t.type == 'IDENTIFIER' and t.value in self.type_names:
if (self.pos < 2 or self.tokens[self.pos-2].type not in
('ENUM', 'STRUCT', 'UNION')):
t.type = 'TYPE_NAME'
t.lexer = self
t.clexpos = self.pos - 1
return t
return None
# --------------------------------------------------------------------------
# Parser
# --------------------------------------------------------------------------
class CParser(object):
'''Parse a C source file.
Subclass and override the handle_* methods. Call `parse` with a string
to parse.
'''
def __init__(self, options, stddef_types=True, gnu_types=True):
self.preprocessor_parser = preprocessor.PreprocessorParser(options,self)
self.parser = yacc.Parser()
prototype = yacc.yacc(method = 'LALR',
debug = False,
module = cgrammar,
write_tables = True,
outputdir = os.path.dirname(__file__),
optimize = True)
# If yacc is reading tables from a file, then it won't find the error
# function... need to set it manually
prototype.errorfunc = cgrammar.p_error
prototype.init_parser(self.parser)
self.parser.cparser = self
self.lexer = CLexer(self)
if stddef_types:
self.lexer.type_names.add('wchar_t')
self.lexer.type_names.add('ptrdiff_t')
self.lexer.type_names.add('size_t')
if gnu_types:
self.lexer.type_names.add('__builtin_va_list')
if sys.platform == 'win32':
self.lexer.type_names.add('__int64')
def parse(self, filename, debug=False):
'''Parse a file.
If `debug` is True, parsing state is dumped to stdout.
'''
self.handle_status('Preprocessing %s' % filename)
self.preprocessor_parser.parse(filename)
self.lexer.input(self.preprocessor_parser.output)
self.handle_status('Parsing %s' % filename)
self.parser.parse(lexer=self.lexer, debug=debug)
# ----------------------------------------------------------------------
# Parser interface. Override these methods in your subclass.
# ----------------------------------------------------------------------
def handle_error(self, message, filename, lineno):
'''A parse error occured.
The default implementation prints `lineno` and `message` to stderr.
The parser will try to recover from errors by synchronising at the
next semicolon.
'''
print >> sys.stderr, '%s:%s %s' % (filename, lineno, message)
def handle_pp_error(self, message):
'''The C preprocessor emitted an error.
The default implementatin prints the error to stderr. If processing
can continue, it will.
'''
print >> sys.stderr, 'Preprocessor:', message
def handle_status(self, message):
'''Progress information.
The default implementationg prints message to stderr.
'''
print >> sys.stderr, message
def handle_define(self, name, params, value, filename, lineno):
'''#define `name` `value`
or #define `name`(`params`) `value`
name is a string
params is None or a list of strings
value is a ...?
'''
def handle_define_constant(self, name, value, filename, lineno):
'''#define `name` `value`
name is a string
value is an ExpressionNode or None
'''
def handle_define_macro(self, name, params, value, filename, lineno):
'''#define `name`(`params`) `value`
name is a string
params is a list of strings
value is an ExpressionNode or None
'''
def impl_handle_declaration(self, declaration, filename, lineno):
'''Internal method that calls `handle_declaration`. This method
also adds any new type definitions to the lexer's list of valid type
names, which affects the parsing of subsequent declarations.
'''
if declaration.storage == 'typedef':
declarator = declaration.declarator
if not declarator:
# XXX TEMPORARY while struct etc not filled
return
while declarator.pointer:
declarator = declarator.pointer
self.lexer.type_names.add(declarator.identifier)
self.handle_declaration(declaration, filename, lineno)
def handle_declaration(self, declaration, filename, lineno):
'''A declaration was encountered.
`declaration` is an instance of Declaration. Where a declaration has
multiple initialisers, each is returned as a separate declaration.
'''
pass
class DebugCParser(CParser):
'''A convenience class that prints each invocation of a handle_* method to
stdout.
'''
def handle_define(self, name, value, filename, lineno):
print '#define name=%r, value=%r' % (name, value)
def handle_define_constant(self, name, value, filename, lineno):
print '#define constant
|
name=%r, value=%r' % (name, value)
|
def handle_declaration(self, declaration, filename, lineno):
print declaration
if __name__ == '__main__':
DebugCParser().parse(sys.argv[1], debug=True)
|
fredmorcos/attic
|
snippets/python/py-des/test_pydes.py
|
Python
|
isc
| 9,694
| 0.030638
|
from pyDes import *
#############################################################################
# Examples #
#############################################################################
def _example_triple_des_():
from time import time
# Utility module
from binascii import unhexlify as unhex
# example shows triple-des encryption using the des class
print ("Example of triple DES encryption in default ECB mode (DES-EDE3)\n")
print ("Triple des using the des class (3 times)")
t = time()
k1 = des(unhex("133457799BBCDFF1"))
k2 = des(unhex("1122334455667788"))
k3 = des(unhex("77661100DD223311"))
d = "Triple DES test string, to be encrypted and decrypted..."
print ("Key1: %r" % k1.getKey())
print ("Key2: %r" % k2.getKey())
print ("Key3: %r" % k3.getKey())
print ("Data: %r" % d)
e1 = k1.encrypt(d)
e2 = k2.decrypt(e1)
e3 = k3.encrypt(e2)
print ("Encrypted: %r" % e3)
d3 = k3.decrypt(e3)
d2 = k2.encrypt(d3)
d1 = k1.decrypt(d2)
print ("Decrypted: %r" % d1)
print ("DES time taken: %f (%d crypt operations)" % (time() - t, 6 * (len(d) / 8)))
print ("")
# Example below uses the triple-des class to achieve the same as above
print ("Now using triple des class")
t = time()
t1 = triple_des(unhex("133457799BBCDFF1112233445566778877661100DD223311"))
print ("Key: %r" % t1.getKey())
print ("Data: %r" % d)
td1 = t1.encrypt(d)
print ("Encrypted: %r" % td1)
td2 = t1.decrypt(td1)
print ("Decrypted: %r" % td2)
print ("Triple DES time taken: %f (%d crypt operations)" % (time() - t, 6 * (len(d) / 8)))
def _example_des_():
from time import time
# example of DES encrypting in CBC mode with the IV of "\0\0\0\0\0\0\0\0"
print ("Example of DES encryption using CBC mode\n")
t = time()
k = des("DESCRYPT", CBC, "\0\0\0\0\0\0\0\0")
data = "DES encryption algorithm"
print ("Key : %r" % k.getKey())
print ("Data : %r" % data)
d = k.encrypt(data)
print ("Encrypted: %r" % d)
d = k.decrypt(d)
print ("Decrypted: %r" % d)
print ("DES time taken: %f (6 crypt operations)" % (time() - t))
print ("")
def _filetest_():
from time import time
f = open("pyDes.py", "rb+")
d = f.read()
f.close()
t = time()
k = des("MyDESKey")
d = k.encrypt(d, " ")
f = open("pyDes.py.enc", "wb+")
f.write(d)
f.close()
d = k.decrypt(d, " ")
f = open("pyDes.py.dec", "wb+")
f.write(d)
f.close()
print ("DES file test time: %f" % (time() - t))
def _profile_():
try:
import cProfile as profile
except:
import profile
profile.run('_fulltest_()')
#profile.run('_filetest_()')
def _fulltest_():
# This should not produce any unexpected errors or exceptions
from time import time
from binascii import unhexlify as unhex
from binascii import hexlify as dohex
t = time()
data = "DES encryption algorithm".encode('ascii')
k = des("\0\0\0\0\0\0\0\0", CBC, "\0\0\0\0\0\0\0\0")
d = k.encrypt(data)
if k.decrypt(d) != data:
print ("Test 1: Error: decrypt does not match. %r != %r" % (data, k.decrypt(d)))
else:
print ("Test 1: Successful")
data = "Default string of text".encode('ascii')
k = des("\0\0\0\0\0\0\0\0", CBC, "\0\0\0\0\0\0\0\0")
d = k.encrypt(data, "*")
if k.decrypt(d, "*") != data:
print ("Test 2: Error: decrypt does not match. %r != %r" % (data, k.decrypt(d)))
else:
print ("Test 2: Successful")
data = "String to Pad".encode('ascii')
k = des("\r\n\tABC\r\n")
d = k.encrypt(data, "*")
if k.decrypt(d, "*") != data:
print ("Test 3: Error: decrypt does not match. %r != %r" % (data, k.decrypt(d)))
else:
print ("Test 3: Successful")
k = des("\r\n\tABC\r\n")
d = k.encrypt(unhex("000102030405060708FF8FDCB04080"), unhex("44"))
if k.decrypt(d, unhex("44")) != unhex("000102030405060708FF8FDCB04080"):
print ("Test 4a: Error: Unencypted data block does not match start data")
eli
|
f k.decrypt(d) != unhex("000102030405060708FF8FDCB0408044"):
print ("Test 4b: Error: Unencypted data block does not match start data")
else:
print ("Test 4: Successful")
data = "String to Pad".encode('ascii')
k = des("\r\n\tk
|
ey\r\n")
d = k.encrypt(data, padmode=PAD_PKCS5)
if k.decrypt(d, padmode=PAD_PKCS5) != data:
print ("Test 5a: Error: decrypt does not match. %r != %r" % (data, k.decrypt(d)))
# Try same with padmode set on the class instance.
k = des("\r\n\tkey\r\n", padmode=PAD_PKCS5)
d = k.encrypt(data)
if k.decrypt(d) != data:
print ("Test 5b: Error: decrypt does not match. %r != %r" % (data, k.decrypt(d)))
else:
print ("Test 5: Successful")
k = triple_des("MyDesKey\r\n\tABC\r\n0987*543")
d = k.encrypt(unhex("000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080"))
if k.decrypt(d) != unhex("000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080"):
print ("Test 6: Error: Unencypted data block does not match start data")
else:
print ("Test 6: Successful")
k = triple_des("\r\n\tABC\r\n0987*543")
d = k.encrypt(unhex("000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080"))
if k.decrypt(d) != unhex("000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080"):
print ("Test 7: Error: Unencypted data block does not match start data")
else:
print ("Test 7: Successful")
k = triple_des("MyDesKey\r\n\tABC\r\n0987*54B", CBC, "12341234")
d = k.encrypt(unhex("000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080"))
if k.decrypt(d) != unhex("000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080"):
print ("Test 8: Error: Triple DES CBC failed.")
else:
print ("Test 8: Successful")
k = triple_des("MyDesKey\r\n\tABC\r\n0987*54B", CBC, "12341234")
d = k.encrypt(unhex("000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDC"), '.')
if k.decrypt(d, '.') != unhex("000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDCB04080000102030405060708FF8FDC"):
print ("Test 9: Error: Triple DES CBC with padding failed.")
else:
print ("Test 9: Successful")
k = triple_des("\r\n\tkey\rIsGoodKey")
data = "String to Pad".encode('ascii')
d = k.encrypt(data, padmode=PAD_PKCS5)
if k.decrypt(d, padmode=PAD_PKCS5) != data:
print ("Test 10: Error: decrypt does not match. %r != %r" % (data, k.decrypt(d)))
else:
print ("Test 10: Successful")
k = triple_des("\r\n\tkey\rIsGoodKey")
data = "String not need Padding.".encode('ascii')
d = k.encrypt(data, padmode=PAD_PKCS5)
if k.decrypt(d, padmode=PAD_PKCS5) != data:
print ("Test 11: Error: decrypt does not match. %r != %r" % (data, k.decrypt(d)))
else:
print ("Test 11: Successful")
# Test PAD_PKCS5 with CBC encryption mode.
k = des("IGoodKey", mode=CBC, IV="\0\1\2\3\4\5\6\7")
data = "String to Pad".encode('ascii')
d = k.encrypt(data, padmode=PAD_PKCS5)
if k.decrypt(d, padmode=PAD_PKCS5) != data:
print ("Test 12: Error: decrypt does not match. %r !=
|
burgerdev/volumina
|
volumina/colorama/ansitowin32.py
|
Python
|
lgpl-3.0
| 7,363
| 0.001901
|
###############################################################################
# volumina: volume slicing and editing library
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import re
import sys
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = sys.platform.startswith('win')
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
}
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrap
|
ped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.AN
|
SI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(paramstring)
self.call_win32(command, params)
def extract_params(self, paramstring):
def split(paramstring):
for p in paramstring.split(';'):
if p != '':
yield int(p)
return tuple(split(paramstring))
def call_win32(self, command, params):
if params == []:
params = [0]
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in ('H', 'f'): # set cursor position
func = winterm.set_cursor_position
func(params, on_stderr=self.on_stderr)
elif command in ('J'):
func = winterm.erase_data
func(params, on_stderr=self.on_stderr)
|
glaudsonml/kurgan-ai
|
tools/sqlmap/thirdparty/clientform/clientform.py
|
Python
|
apache-2.0
| 126,378
| 0.001369
|
"""HTML form handling for web clients.
ClientForm is a Python module for handling HTML forms on the client
side, useful for parsing HTML forms, filling them in and returning the
completed forms to the server. It has developed from a port of Gisle
Aas' Perl module HTML::Form, from the libwww-perl library, but the
interface is not the same.
The most useful docstring is the one for HTMLForm.
RFC 1866: HTML 2.0
RFC 1867: Form-based File Upload in HTML
RFC 2388: Returning Values from Forms: multipart/form-data
HTML 3.2 Specification, W3C Recommendation 14 January 1997 (for ISINDEX)
HTML 4.01 Specification, W3C Recommendation 24 December 1999
Copyright 2002-2007 John J. Lee <jjl@pobox.com>
Copyright 2005 Gary Poster
Copyright 2005 Zope Corporation
Copyright 1998-2000 Gisle Aas.
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
# XXX
# Remove parser testing hack
# safeUrl()-ize action
# Switch to unicode throughout (would be 0.3.x)
# See Wichert Akkerman's 2004-01-22 message to c.l.py.
# Add charset parameter to Content-type headers? How to find value??
# Add some more functional tests
# Especially single and multiple file upload on the internet.
# Does file upload work when name is missing? Sourceforge tracker form
# doesn't like it. Check standards, and test with Apache. Test
# binary upload with Apache.
# mailto submission & enctype text/plain
# I'm not going to fix this unless somebody tells me what real servers
# that want this encoding actually expect: If enctype is
# application/x-www-form-urlencoded and there's a FILE control present.
# Strictly, it should be 'name=data' (see HTML 4.01 spec., section
# 17.13.2), but I send "name=" ATM. What about multiple file upload??
# Would be nice, but I'm not going to do it myself:
# -------------------------------------------------
# Maybe a 0.4.x?
# Replace by_label etc. with moniker / selector concept. Allows, eg.,
# a choice between selection by value / id / label / element
# contents. Or choice between matching labels exactly or by
# substring. Etc.
# Remove deprecated methods.
# ...what else?
# Work on DOMForm.
# XForms? Don't know if there's a need here.
__all__ = ['AmbiguityE
|
rror', 'CheckboxControl', 'Cont
|
rol',
'ControlNotFoundError', 'FileControl', 'FormParser', 'HTMLForm',
'HiddenControl', 'IgnoreControl', 'ImageControl', 'IsindexControl',
'Item', 'ItemCountError', 'ItemNotFoundError', 'Label',
'ListControl', 'LocateError', 'Missing', 'ParseError', 'ParseFile',
'ParseFileEx', 'ParseResponse', 'ParseResponseEx','PasswordControl',
'RadioControl', 'ScalarControl', 'SelectControl',
'SubmitButtonControl', 'SubmitControl', 'TextControl',
'TextareaControl', 'XHTMLCompatibleFormParser']
try: True
except NameError:
True = 1
False = 0
try: bool
except NameError:
def bool(expr):
if expr: return True
else: return False
try:
import logging
import inspect
except ImportError:
def debug(msg, *args, **kwds):
pass
else:
_logger = logging.getLogger("ClientForm")
OPTIMIZATION_HACK = True
def debug(msg, *args, **kwds):
if OPTIMIZATION_HACK:
return
caller_name = inspect.stack()[1][3]
extended_msg = '%%s %s' % msg
extended_args = (caller_name,)+args
debug = _logger.debug(extended_msg, *extended_args, **kwds)
def _show_debug_messages():
global OPTIMIZATION_HACK
OPTIMIZATION_HACK = False
_logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
_logger.addHandler(handler)
import sys, urllib, urllib2, types, mimetools, copy, urlparse, \
htmlentitydefs, re, random
from cStringIO import StringIO
import sgmllib
# monkeypatch to fix http://www.python.org/sf/803422 :-(
sgmllib.charref = re.compile("&#(x?[0-9a-fA-F]+)[^0-9a-fA-F]")
# HTMLParser.HTMLParser is recent, so live without it if it's not available
# (also, sgmllib.SGMLParser is much more tolerant of bad HTML)
try:
import HTMLParser
except ImportError:
HAVE_MODULE_HTMLPARSER = False
else:
HAVE_MODULE_HTMLPARSER = True
try:
import warnings
except ImportError:
def deprecation(message, stack_offset=0):
pass
else:
def deprecation(message, stack_offset=0):
warnings.warn(message, DeprecationWarning, stacklevel=3+stack_offset)
VERSION = "0.2.10"
CHUNK = 1024 # size of chunks fed to parser, in bytes
DEFAULT_ENCODING = "latin-1"
class Missing: pass
_compress_re = re.compile(r"\s+")
def compress_text(text): return _compress_re.sub(" ", text.strip())
def normalize_line_endings(text):
return re.sub(r"(?:(?<!\r)\n)|(?:\r(?!\n))", "\r\n", text)
# This version of urlencode is from my Python 1.5.2 back-port of the
# Python 2.1 CVS maintenance branch of urllib. It will accept a sequence
# of pairs instead of a mapping -- the 2.0 version only accepts a mapping.
def urlencode(query,doseq=False,):
"""Encode a sequence of two-element tuples or dictionary into a URL query \
string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
x = len(query)
# non-empty strings will fail this
if len(query) and type(query[0]) != types.TupleType:
raise TypeError()
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError("not a valid non-string sequence or mapping "
"object", tb)
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = urllib.quote_plus(str(k))
v = urllib.quote_plus(str(v))
l.append(k + '=' + v)
else:
for k, v in query:
k = urllib.quote_plus(str(k))
if type(v) == types.StringType:
v = urllib.quote_plus(v)
l.append(k + '=' + v)
elif type(v) == types.UnicodeType:
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = urllib.quote_plus(v.encode("ASCII","replace"))
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = urllib.quote_plus(str(v))
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + urllib.quote_plus(str(elt)))
return '&'.join(l)
def unescape(data, entities, encoding=DEFAULT_ENCODING):
if data is None or "&" not in data:
return data
def replace_entities(match, entities=entities, encoding=encoding):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent)
if repl is not None:
if type(repl) != type(""):
try:
repl = repl.e
|
cthGoman/shrdlite
|
cgi-bin/ajaxwrapper.py
|
Python
|
gpl-3.0
| 1,269
| 0.007092
|
#!/usr/bin/env python
from __future__ import print_function
import os
import cgi
from subprocess import Popen, PIPE, STDOUT
# Java
SCRIPTDIR = 'javaprolog'
# SCRIPT = ['/usr/bin/java', '-cp', 'json-simple-1.1.1.jar:gnuprologjava-0.2.6.jar:.', 'Shrdlite']
import platform
if pla
|
tform.system()=='Windows':
SCRIPT = ['java', '-cp', 'json-simple-1.1.1.jar;gnuprologjava-0.2.6.jar;.', 'Shrdlite']
else:
SCRIPT = ['java', '-cp', 'json-simple-1.1.1.jar:gnuprologjava-0.2.6.jar:.', 'Shrdlite']
# # SWI Prolog
# SCRIPTDIR = 'javaprolog'
# SCRIPT = ['/usr/local/bin/swipl', '-q', '-g', 'main,halt', '-t', 'halt(1)', '-s', 'shrdlite.pl']
# # Haskell
# SCRIPTDIR = 'haskell'
# SCRIPT = ['/usr/bin/runhaskell', 'Shrdlite.hs']
# Python
# SCRIPTDIR = 'p
|
ython'
# SCRIPT = ['/usr/bin/python', 'shrdlite.py']
while not os.path.isdir(SCRIPTDIR):
SCRIPTDIR = os.path.join("..", SCRIPTDIR)
print('Content-type:text/plain')
print()
try:
form = cgi.FieldStorage()
data = form.getfirst('data')
script = Popen(SCRIPT, cwd=SCRIPTDIR, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = script.communicate(data)
print(out)
if err:
raise Exception(err)
except:
import sys, traceback
print(traceback.format_exc())
sys.exit(1)
|
DataDog/integrations-extras
|
calico/tests/test_e2e.py
|
Python
|
bsd-3-clause
| 492
| 0
|
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.dev.utils i
|
mport get_metadat
|
a_metrics
from . import common
@pytest.mark.e2e
def test_check_ok(dd_agent_check):
aggregator = dd_agent_check(rate=True)
metrics = common.FORMATTED_EXTRA_METRICS
for metric in metrics:
aggregator.assert_metric(metric)
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
|
nlloyd/SubliminalCollaborator
|
libs/twisted/trial/test/test_deferred.py
|
Python
|
apache-2.0
| 8,003
| 0.00025
|
from twisted.internet import defer
from twisted.trial import unittest
from twisted.trial import runner, reporter, util
from twisted.trial.test import detests
class TestSetUp(unittest.TestCase):
def _loadSuite(self, klass):
loader = runner.TestLoader()
r = reporter.TestResult()
s = loader.loadClass(klass)
return r, s
def test_success(self):
result, suite = self._loadSuite(detests.DeferredSetUpOK)
suite(result)
self.failUnless(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
def test_fail(self):
self.failIf(detests.DeferredSetUpFail.testCalled)
result, suite = self._loadSuite(detests.DeferredSetUpFail)
suite(result)
self.failIf(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(len(result.errors), 1)
self.failIf(detests.DeferredSetUpFail.testCalled)
def test_callbackFail(self):
self.failIf(detests.DeferredSetUpCallbackFail.testCalled)
result, suite = self._loadSuite(detests.DeferredSetUpCallbackFail)
suite(result)
self.failIf(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(len(result.errors), 1)
self.failIf(detests.DeferredSetUpCallbackFail.testCalled)
def test_error(self):
self.failIf(detests.DeferredSetUpError.testCalled)
result, suite = self._loadSuite(detests.DeferredSetUpError)
suite(result)
self.failIf(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(len(result.errors), 1)
self.failIf(detests.DeferredSetUpError.testCalled)
def test_skip(self):
self.failIf(detests.DeferredSetUpSkip.testCalled)
result, suite = self._loadSuite(detests.DeferredSetUpSkip)
suite(result)
self.failUnless(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skips), 1)
self.failIf(detests.DeferredSetUpSkip.testCalled)
class TestNeverFire(unittest.TestCase):
def setUp(self):
self._oldTimeout = util.DEFAULT_TIMEOUT_DURATION
util.DEFAULT_TIMEOUT_DURATION = 0.1
def tearDown(self):
util.DEFAULT_TIMEOUT_DURATION = self._oldTimeout
def _loadSuite(self, klass):
loader = runner.TestLoader()
r = reporter.TestResult()
s =
|
loader.loadClass(klass)
return r, s
def test_setUp(self):
self.failIf(detests.DeferredSetUpNeverFire.testCalled)
result, suite = self._loadSuite(detests.DeferredSetUpNev
|
erFire)
suite(result)
self.failIf(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(len(result.errors), 1)
self.failIf(detests.DeferredSetUpNeverFire.testCalled)
self.failUnless(result.errors[0][1].check(defer.TimeoutError))
class TestTester(unittest.TestCase):
def getTest(self, name):
raise NotImplementedError("must override me")
def runTest(self, name):
result = reporter.TestResult()
self.getTest(name).run(result)
return result
class TestDeferred(TestTester):
def getTest(self, name):
return detests.DeferredTests(name)
def test_pass(self):
result = self.runTest('test_pass')
self.failUnless(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
def test_passGenerated(self):
result = self.runTest('test_passGenerated')
self.failUnless(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.failUnless(detests.DeferredTests.touched)
def test_fail(self):
result = self.runTest('test_fail')
self.failIf(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.failures), 1)
def test_failureInCallback(self):
result = self.runTest('test_failureInCallback')
self.failIf(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.failures), 1)
def test_errorInCallback(self):
result = self.runTest('test_errorInCallback')
self.failIf(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.errors), 1)
def test_skip(self):
result = self.runTest('test_skip')
self.failUnless(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.skips), 1)
self.failIf(detests.DeferredTests.touched)
def test_todo(self):
result = self.runTest('test_expectedFailure')
self.failUnless(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(len(result.expectedFailures), 1)
def test_thread(self):
result = self.runTest('test_thread')
self.assertEqual(result.testsRun, 1)
self.failUnless(result.wasSuccessful(), result.errors)
class TestTimeout(TestTester):
def getTest(self, name):
return detests.TimeoutTests(name)
def _wasTimeout(self, error):
self.assertEqual(error.check(defer.TimeoutError),
defer.TimeoutError)
def test_pass(self):
result = self.runTest('test_pass')
self.failUnless(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
def test_passDefault(self):
result = self.runTest('test_passDefault')
self.failUnless(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
def test_timeout(self):
result = self.runTest('test_timeout')
self.failIf(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.errors), 1)
self._wasTimeout(result.errors[0][1])
def test_timeoutZero(self):
result = self.runTest('test_timeoutZero')
self.failIf(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.errors), 1)
self._wasTimeout(result.errors[0][1])
def test_skip(self):
result = self.runTest('test_skip')
self.failUnless(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.skips), 1)
def test_todo(self):
result = self.runTest('test_expectedFailure')
self.failUnless(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.expectedFailures), 1)
self._wasTimeout(result.expectedFailures[0][1])
def test_errorPropagation(self):
result = self.runTest('test_errorPropagation')
self.failIf(result.wasSuccessful())
self.assertEqual(result.testsRun, 1)
self._wasTimeout(detests.TimeoutTests.timedOut)
def test_classTimeout(self):
loader = runner.TestLoader()
suite = loader.loadClass(detests.TestClassTimeoutAttribute)
result = reporter.TestResult()
suite.run(result)
self.assertEqual(len(result.errors), 1)
self._wasTimeout(result.errors[0][1])
def test_callbackReturnsNonCallingDeferred(self):
#hacky timeout
# raises KeyboardInterrupt because Trial sucks
from twisted.internet import reactor
call = reactor.callLater(2, reactor.crash)
result = self.runTest('test_calledButNeverCallback')
if call.active():
call.cancel()
self.failIf(result.wasSuccessful())
self._wasTimeout(result.errors[0][1])
|
unioslo/cerebrum
|
Cerebrum/modules/pwcheck/__init__.py
|
Python
|
gpl-2.0
| 1,022
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015-2016 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""The pwc
|
heck sub-package implements mixins for password checks.
Each module in this sub-package provides mixins
|
that can be used to check if a
password is strong enough to be accepted for use.
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.