repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
zploskey/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_double_null.py
|
276
|
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface DoubleNull {
attribute byte?? foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
|
leafclick/intellij-community
|
refs/heads/master
|
python/helpers/py2only/docutils/parsers/rst/languages/he.py
|
128
|
# Author: Meir Kriheli
# Id: $Id: he.py 7119 2011-09-02 13:00:23Z milde $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
English-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'\u05ea\u05e9\u05d5\u05de\u05ea \u05dc\u05d1': 'attention',
u'\u05d6\u05d4\u05d9\u05e8\u05d5\u05ea': 'caution',
u'code (translation required)': 'code',
u'\u05e1\u05db\u05e0\u05d4': 'danger',
u'\u05e9\u05d2\u05d9\u05d0\u05d4' : 'error',
u'\u05e8\u05de\u05d6': 'hint',
u'\u05d7\u05e9\u05d5\u05d1': 'important',
u'\u05d4\u05e2\u05e8\u05d4': 'note',
u'\u05d8\u05d9\u05e4': 'tip',
u'\u05d0\u05d6\u05d4\u05e8\u05d4': 'warning',
'admonition': 'admonition',
'sidebar': 'sidebar',
'topic': 'topic',
'line-block': 'line-block',
'parsed-literal': 'parsed-literal',
'rubric': 'rubric',
'epigraph': 'epigraph',
'highlights': 'highlights',
'pull-quote': 'pull-quote',
'compound': 'compound',
'container': 'container',
#'questions': 'questions',
'table': 'table',
'csv-table': 'csv-table',
'list-table': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
u'\u05ea\u05de\u05d5\u05e0\u05d4': 'image',
'figure': 'figure',
'include': 'include',
'raw': 'raw',
'replace': 'replace',
'unicode': 'unicode',
'date': 'date',
u'\u05e1\u05d2\u05e0\u05d5\u05df': 'class',
'role': 'role',
'default-role': 'default-role',
'title': 'title',
u'\u05ea\u05d5\u05db\u05df': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
'header': 'header',
'footer': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
'target-notes': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""English name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'abbreviation': 'abbreviation',
'ab': 'abbreviation',
'acronym': 'acronym',
'ac': 'acronym',
u'code (translation required)': 'code',
'index': 'index',
'i': 'index',
u'\u05ea\u05d7\u05ea\u05d9': 'subscript',
'sub': 'subscript',
u'\u05e2\u05d9\u05dc\u05d9': 'superscript',
'sup': 'superscript',
'title-reference': 'title-reference',
'title': 'title-reference',
't': 'title-reference',
'pep-reference': 'pep-reference',
'pep': 'pep-reference',
'rfc-reference': 'rfc-reference',
'rfc': 'rfc-reference',
'emphasis': 'emphasis',
'strong': 'strong',
'literal': 'literal',
'math (translation required)': 'math',
'named-reference': 'named-reference',
'anonymous-reference': 'anonymous-reference',
'footnote-reference': 'footnote-reference',
'citation-reference': 'citation-reference',
'substitution-reference': 'substitution-reference',
'target': 'target',
'uri-reference': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'raw': 'raw',}
"""Mapping of English role names to canonical role names for interpreted text.
"""
|
StevenVanAcker/mitmproxy
|
refs/heads/master
|
examples/simple/modify_form.py
|
6
|
from mitmproxy import http
def request(flow: http.HTTPFlow) -> None:
if flow.request.urlencoded_form:
# If there's already a form, one can just add items to the dict:
flow.request.urlencoded_form["mitmproxy"] = "rocks"
else:
# One can also just pass new form data.
# This sets the proper content type and overrides the body.
flow.request.urlencoded_form = [
("foo", "bar")
]
|
moble/sympy
|
refs/heads/master
|
examples/advanced/fem.py
|
107
|
#!/usr/bin/env python
"""FEM library
Demonstrates some simple finite element definitions, and computes a mass
matrix
$ python fem.py
[ 1/60, 0, -1/360, 0, -1/90, -1/360]
[ 0, 4/45, 0, 2/45, 2/45, -1/90]
[-1/360, 0, 1/60, -1/90, 0, -1/360]
[ 0, 2/45, -1/90, 4/45, 2/45, 0]
[ -1/90, 2/45, 0, 2/45, 4/45, 0]
[-1/360, -1/90, -1/360, 0, 0, 1/60]
"""
from sympy import symbols, Symbol, factorial, Rational, zeros, div, eye, \
integrate, diff, pprint, reduced
x, y, z = symbols('x,y,z')
class ReferenceSimplex:
def __init__(self, nsd):
self.nsd = nsd
if nsd <= 3:
coords = symbols('x,y,z')[:nsd]
else:
coords = [Symbol("x_%d" % d) for d in range(nsd)]
self.coords = coords
def integrate(self, f):
coords = self.coords
nsd = self.nsd
limit = 1
for p in coords:
limit -= p
intf = f
for d in range(0, nsd):
p = coords[d]
limit += p
intf = integrate(intf, (p, 0, limit))
return intf
def bernstein_space(order, nsd):
if nsd > 3:
raise RuntimeError("Bernstein only implemented in 1D, 2D, and 3D")
sum = 0
basis = []
coeff = []
if nsd == 1:
b1, b2 = x, 1 - x
for o1 in range(0, order + 1):
for o2 in range(0, order + 1):
if o1 + o2 == order:
aij = Symbol("a_%d_%d" % (o1, o2))
sum += aij*binomial(order, o1)*pow(b1, o1)*pow(b2, o2)
basis.append(binomial(order, o1)*pow(b1, o1)*pow(b2, o2))
coeff.append(aij)
if nsd == 2:
b1, b2, b3 = x, y, 1 - x - y
for o1 in range(0, order + 1):
for o2 in range(0, order + 1):
for o3 in range(0, order + 1):
if o1 + o2 + o3 == order:
aij = Symbol("a_%d_%d_%d" % (o1, o2, o3))
fac = factorial(order) / (factorial(o1)*factorial(o2)*factorial(o3))
sum += aij*fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)
basis.append(fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3))
coeff.append(aij)
if nsd == 3:
b1, b2, b3, b4 = x, y, z, 1 - x - y - z
for o1 in range(0, order + 1):
for o2 in range(0, order + 1):
for o3 in range(0, order + 1):
for o4 in range(0, order + 1):
if o1 + o2 + o3 + o4 == order:
aij = Symbol("a_%d_%d_%d_%d" % (o1, o2, o3, o4))
fac = factorial(order)/(factorial(o1)*factorial(o2)*factorial(o3)*factorial(o4))
sum += aij*fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)*pow(b4, o4)
basis.append(fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)*pow(b4, o4))
coeff.append(aij)
return sum, coeff, basis
def create_point_set(order, nsd):
h = Rational(1, order)
set = []
if nsd == 1:
for i in range(0, order + 1):
x = i*h
if x <= 1:
set.append((x, y))
if nsd == 2:
for i in range(0, order + 1):
x = i*h
for j in range(0, order + 1):
y = j*h
if x + y <= 1:
set.append((x, y))
if nsd == 3:
for i in range(0, order + 1):
x = i*h
for j in range(0, order + 1):
y = j*h
for k in range(0, order + 1):
z = j*h
if x + y + z <= 1:
set.append((x, y, z))
return set
def create_matrix(equations, coeffs):
A = zeros(len(equations))
i = 0
j = 0
for j in range(0, len(coeffs)):
c = coeffs[j]
for i in range(0, len(equations)):
e = equations[i]
d, _ = reduced(e, [c])
A[i, j] = d[0]
return A
class Lagrange:
def __init__(self, nsd, order):
self.nsd = nsd
self.order = order
self.compute_basis()
def nbf(self):
return len(self.N)
def compute_basis(self):
order = self.order
nsd = self.nsd
N = []
pol, coeffs, basis = bernstein_space(order, nsd)
points = create_point_set(order, nsd)
equations = []
for p in points:
ex = pol.subs(x, p[0])
if nsd > 1:
ex = ex.subs(y, p[1])
if nsd > 2:
ex = ex.subs(z, p[2])
equations.append(ex)
A = create_matrix(equations, coeffs)
Ainv = A.inv()
b = eye(len(equations))
xx = Ainv*b
for i in range(0, len(equations)):
Ni = pol
for j in range(0, len(coeffs)):
Ni = Ni.subs(coeffs[j], xx[j, i])
N.append(Ni)
self.N = N
def main():
t = ReferenceSimplex(2)
fe = Lagrange(2, 2)
u = 0
# compute u = sum_i u_i N_i
us = []
for i in range(0, fe.nbf()):
ui = Symbol("u_%d" % i)
us.append(ui)
u += ui*fe.N[i]
J = zeros(fe.nbf())
for i in range(0, fe.nbf()):
Fi = u*fe.N[i]
print(Fi)
for j in range(0, fe.nbf()):
uj = us[j]
integrands = diff(Fi, uj)
print(integrands)
J[j, i] = t.integrate(integrands)
pprint(J)
if __name__ == "__main__":
main()
|
luckylavish/zamboni
|
refs/heads/master
|
mkt/ratings/__init__.py
|
67
|
REVIEW_MODERATE_KEEP = -1
REVIEW_MODERATE_SKIP = 0
REVIEW_MODERATE_DELETE = 1
|
seanli9jan/tensorflow
|
refs/heads/master
|
tensorflow/contrib/optimizer_v2/optimizer_v2.py
|
2
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Version 2 of class Optimizer."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import distribution_strategy_context as distribute_ctx
from tensorflow.python.training import optimizer as optimizer_v1
from tensorflow.python.training import slot_creator
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
@six.add_metaclass(abc.ABCMeta)
class _OptimizableVariable(object):
"""Interface for abstracting over variables in the optimizers."""
@abc.abstractmethod
def target(self):
"""Returns the optimization target for this variable."""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def update_op(self, optimizer, g, *args):
"""Returns the update ops for updating the variable."""
raise NotImplementedError("Calling an abstract method.")
class _RefVariableProcessor(_OptimizableVariable):
"""Processor for Variable."""
def __init__(self, v):
self._v = v
def target(self):
return self._v._ref() # pylint: disable=protected-access
def update_op(self, optimizer, g, *args):
if isinstance(g, ops.Tensor):
update_op = optimizer._apply_dense(g, self._v, *args) # pylint: disable=protected-access
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
else:
assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
"tensor nor IndexedSlices.")
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
# pylint: disable=protected-access
return optimizer._apply_sparse_duplicate_indices(g, self._v, *args)
class _DenseReadResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g, *args):
# pylint: disable=protected-access
update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0], *args)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _DenseResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g, *args):
# pylint: disable=protected-access
if isinstance(g, ops.IndexedSlices):
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices, *args)
update_op = optimizer._resource_apply_dense(g, self._v, *args)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _TensorProcessor(_OptimizableVariable):
"""Processor for ordinary Tensors.
Even though a Tensor can't really be updated, sometimes it is useful to
compute the gradients with respect to a Tensor using the optimizer. Updating
the Tensor is, of course, unsupported.
"""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g, *args):
raise NotImplementedError("Trying to update a Tensor ", self._v)
def _get_processor(v):
"""The processor of v."""
if context.executing_eagerly():
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
else:
return _DenseResourceVariableProcessor(v)
if v.op.type == "VarHandleOp":
return _DenseResourceVariableProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableProcessor(v)
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
def _var_key_v2(var):
"""Key for representing a primary variable, for looking up slots."""
# pylint: disable=protected-access
if hasattr(var, "_distributed_container"):
distributed_container = var._distributed_container()
assert distributed_container is not None
if context.executing_eagerly():
return distributed_container._unique_id
return distributed_container._shared_name
if context.executing_eagerly():
return var._unique_id
return var.op.name
def _resolve(value, name):
if callable(value):
value = value()
return ops.convert_to_tensor(value, name=name)
def _is_dynamic(value):
"""Returns true if __init__ arg `value` should be re-evaluated each step."""
if callable(value):
return True
# Don't need to do anything special in graph mode, since dynamic values
# will propagate correctly automatically.
# TODO(josh11b): Add per-device caching across steps using variables for
# truly static values once we add distributed support.
if context.executing_eagerly() and isinstance(
value, resource_variable_ops.ResourceVariable):
return True
return False
class _OptimizerV2State(object):
"""Holds per-graph and per-step optimizer state.
Use _init_with_static_hyper() to create the state for a graph, and then
_copy_with_dynamic_hyper() to convert that to state for a particular step.
The difference between the two is that the former only has hyper
parameter values that are static and the latter also has values that
can change every step (according to _is_dynamic()).
"""
def __init__(self, op_name):
self._op_name = op_name
def _init_with_static_hyper(self, hyper):
"""Initialize a fresh state object from hyper dict."""
# self._hyper contains a dict from name to a dict with the Tensor values.
# This dict starts with a single item with key "None" with the hyper
# parameter value converted to a Tensor. Other items have dtype keys
# with that Tensor cast to that dtype.
with ops.init_scope():
self._hyper = {
name: {
None: ops.convert_to_tensor(value, name=name)
} for name, (dynamic, value) in sorted(hyper.items()) if not dynamic
}
self._slots = {}
self._non_slot_dict = {}
# Extra state to help Optimizers implement Checkpointable. Holds information
# about variables which will be restored as soon as they're created.
self._deferred_dependencies = {} # Non-slot variables
self._deferred_slot_restorations = {} # Slot variables
def _copy_with_dynamic_hyper(self, hyper, distribution, non_slot_devices):
"""Create a new state object for a particular step."""
ret = _OptimizerV2State(self._op_name)
# pylint: disable=protected-access
ret._slots = self._slots
ret._non_slot_dict = self._non_slot_dict
ret._deferred_dependencies = self._deferred_dependencies
ret._deferred_slot_restorations = self._deferred_slot_restorations
ret._hyper = {
name: {
None: _resolve(value, name)
} for name, (dynamic, value) in sorted(hyper.items()) if dynamic
}
ret._hyper.update(self._hyper)
ret._non_slot_devices = non_slot_devices
ret._distribution = distribution
return ret
def _variables(self):
"""Returns a list of all variables held by self."""
optimizer_variables = list(self._non_slot_dict.values())
for variable_dict in self._slots.values():
for slot_for_variable in variable_dict.values():
optimizer_variables.append(slot_for_variable)
# Sort variables by name so that the return is deterministic.
return sorted(optimizer_variables, key=lambda v: v.name)
def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def create_slot(self, var, val, slot_name, optional_op_name=None):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that needs to be
created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
var_key = _var_key_v2(var)
if var_key not in named_slots:
new_slot_variable = slot_creator.create_slot(
var, val, optional_op_name or self._op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var, slot_variable=new_slot_variable)
named_slots[var_key] = new_slot_variable
return named_slots[var_key]
def create_slot_with_initializer(self,
var,
initializer,
shape,
dtype,
slot_name,
optional_op_name=None):
"""Find or create a slot for a variable, using an Initializer.
Args:
var: A `Variable` object.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that needs to be
created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
var_key = _var_key_v2(var)
if var_key not in named_slots:
new_slot_variable = slot_creator.create_slot_with_initializer(
var, initializer, shape, dtype, optional_op_name or self._op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var, slot_variable=new_slot_variable)
named_slots[var_key] = new_slot_variable
return named_slots[var_key]
def zeros_slot(self, var, slot_name, optional_op_name=None):
"""Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that needs to be
created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
var_key = _var_key_v2(var)
if var_key not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(
var, optional_op_name or self._op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var, slot_variable=new_slot_variable)
named_slots[var_key] = new_slot_variable
return named_slots[var_key]
def _create_or_restore_slot_variable(self,
slot_variable_position,
slot_name,
variable,
optional_op_name=None):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `checkpointable._CheckpointPosition` object
indicating the slot variable `Checkpointable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
optional_op_name: Name to use when scoping the Variable that needs to be
created for the slot.
"""
slot_variable = self.get_slot(var=variable, name=slot_name)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = checkpointable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self.create_slot(
var=variable,
val=initializer,
slot_name=slot_name,
optional_op_name=optional_op_name)
# Optimizers do not have unconditional dependencies on their slot
# variables (nor do any other objects). They are only saved if the
# variables they were created for are also saved.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
variable_key = _var_key_v2(variable)
self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(
variable_key, []).append(slot_variable_position)
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
named_slots = self._slots.get(name, None)
if not named_slots:
return None
return named_slots.get(_var_key_v2(var), None)
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
return sorted(self._slots.keys())
def create_non_slot(self, initial_value, name, colocate_with=None):
"""Add an extra variable, not associated with a slot."""
v = self._non_slot_dict.get(name, None)
if v is None:
if colocate_with is None:
colocate_with = self._non_slot_devices
with self._distribution.colocate_vars_with(colocate_with):
# TODO(josh11b): Use get_variable() except for the legacy Adam use case.
v = variable_scope.variable(initial_value, name=name, trainable=False)
self._non_slot_dict[name] = v
deferred_dependencies_list = self._deferred_dependencies.pop(name, ())
for checkpoint_position in sorted(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid,
reverse=True):
checkpoint_position.restore(v)
return v
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key_v2(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(
key=lambda position: position.restore_uid, reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def get_non_slot(self, name):
"""Returns the non-slot variable identified by `name`."""
return self._non_slot_dict.get(name, None)
def get_hyper(self, name, dtype=None):
"""Returns the `name` hyper parameter, optionally cast to `dtype`."""
dtype_dict = self._hyper[name]
# Do we have the value cast to dtype already cached? This should always
# succeed when dtype is None.
if dtype in dtype_dict:
return dtype_dict[dtype]
# Not cached, cast to dtype and save the result in the cache.
result = math_ops.cast(dtype_dict[None], dtype)
dtype_dict[dtype] = result
return result
class OptimizerV2(optimizer_v1.Optimizer):
"""Updated base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Add Ops to the graph to minimize a cost by updating a list of variables.
# "cost" is a Tensor, and the list of variables contains tf.Variable
# objects.
opt_op = opt.minimize(cost, var_list=<list of variables>)
```
In the training program you will just have to run the returned Op.
```python
# Execute opt_op to do one step of training:
opt_op.run()
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `compute_gradients()`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Compute the gradients for a list of variables.
grads_and_vars = opt.compute_gradients(loss, <list of variables>)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Gating Gradients
Both `minimize()` and `compute_gradients()` accept a `gate_gradients`
argument that controls the degree of parallelism during the application of
the gradients.
The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.
<b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides
the maximum parallelism in execution, at the cost of some non-reproducibility
in the results. For example the two gradients of `matmul` depend on the input
values: With `GATE_NONE` one of the gradients could be applied to one of the
inputs _before_ the other gradient is computed resulting in non-reproducible
results.
<b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before
they are used. This prevents race conditions for Ops that generate gradients
for multiple inputs where the gradients depend on the inputs.
<b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed
before any one of them is used. This provides the least parallelism but can
be useful if you want to process all gradients before applying any of them.
### Slots
Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`
allocate and manage additional variables associated with the variables to
train. These are called <i>Slots</i>. Slots have names and you can ask the
optimizer for the names of the slots that it uses. Once you have a slot name
you can ask the optimizer for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
### Non-slot variables
Some optimizer subclasses, such as `AdamOptimizer` have variables that
are not associated with the variables to train, just the step itself.
### Hyper parameters
These are arguments passed to the optimizer subclass constructor
(the `__init__` method), and then passed to `self._set_hyper()`.
They can be either regular Python values (like 1.0), tensors, or
callables. If they are callable, the callable will be called during
`apply_gradients()` to get the value for the hyper parameter.
### State
Internal methods are passed a `state` argument with the correct
values to use for the slot and non-slot variables, and the hyper
parameters.
"""
# Values for gate_gradients.
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, use_locking, name):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Note that Optimizer instances should not bind to a single graph,
and so shouldn't keep Tensors as member variables. Generally
you should be able to use the _set_hyper()/state.get_hyper()
facility instead.
Args:
use_locking: Bool. If True apply use locks to prevent concurrent updates
to variables.
name: A non-empty string. The name to use for accumulators created
for the optimizer.
Raises:
ValueError: If name is malformed.
RuntimeError: If _create_slots has been overridden instead of
_create_vars.
"""
# Note: We intentionally don't call parent __init__.
# Optimizer._create_slots was replaced by _create_vars in OptimizerV2.
if (self.__class__._create_slots.__code__ is not # pylint: disable=protected-access
OptimizerV2._create_slots.__code__):
raise RuntimeError(
"Override _create_vars instead of _create_slots when "
"descending from OptimizerV2 (class %s)" % self.__class__.__name__)
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = use_locking
self._name = name
# Map from graph_key to state for that graph. We use the graph_key
# since it works in both eager and graph mode, and gives the outer
# graph inside functions.
replica_context = distribute_ctx.get_replica_context()
if replica_context is None:
# In a cross-replica context for a DistributionStrategy, which means
# only one Optimizer will be created, not one per replica.
self._per_graph_state = {}
else:
# We use get_replica_context().merge_call() to get a single dict
# shared across all model replicas when running with a
# DistributionStrategy.
self._per_graph_state = replica_context.merge_call(lambda _: {})
# Hyper parameters, and whether they should be re-evaluated every step.
self._hyper = {}
def _set_hyper(self, name, value):
self._hyper[name] = (_is_dynamic(value), value)
def minimize(self,
loss,
global_step=None,
var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None,
stop_gradients=None,
scale_loss_by_num_replicas=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the variables
have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in the
graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
stop_gradients: Optional. A Tensor or list of tensors not to differentiate
through.
scale_loss_by_num_replicas: Optional boolean. If true, scale the loss down
by the number of replicas. By default, auto-detects whether this is
needed.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes elements of `var_list` as arguments and computes the value to be
minimized. If `var_list` is None, `loss` should take no arguments.
Minimization (and gradient computation) is done with respect to the
elements of `var_list` if not None, else with respect to any trainable
variables created during the execution of the `loss` function.
`gate_gradients`, `aggregation_method`, `colocate_gradients_with_ops` and
`grad_loss` are ignored when eager execution is enabled.
@end_compatibility
"""
grads_and_vars = self.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss,
stop_gradients=stop_gradients,
scale_loss_by_num_replicas=scale_loss_by_num_replicas)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
return self.apply_gradients(
grads_and_vars, global_step=global_step, name=name)
def compute_gradients(self,
loss,
var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None,
stop_gradients=None,
scale_loss_by_num_replicas=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize or a callable taking no
arguments which returns the value to minimize. When eager execution is
enabled it must be a callable.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph under
the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
stop_gradients: Optional. A Tensor or list of tensors not to differentiate
through.
scale_loss_by_num_replicas: Optional boolean. If true, scale the loss down
by the number of replicas. By default, auto-detects whether this is
needed.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
RuntimeError: If called with eager execution enabled and `loss` is
not callable.
@compatibility(eager)
When eager execution is enabled, `gate_gradients`, `aggregation_method`,
and `colocate_gradients_with_ops` are ignored.
@end_compatibility
"""
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
if callable(loss):
with backprop.GradientTape() as tape:
if var_list is not None:
tape.watch(var_list)
loss_value = loss()
# Scale loss for number of replicas (callable-loss case). In this case,
# we have to be careful to call distribute_lib.get_loss_reduction()
# *after* loss() is evaluated, so we know what loss reduction it uses.
if scale_loss_by_num_replicas is None:
scale_loss_by_num_replicas = (
distribute_lib.get_loss_reduction() == variable_scope
.VariableAggregation.MEAN)
if scale_loss_by_num_replicas:
num_replicas = distribute_ctx.get_distribution_strategy().num_replicas
if num_replicas > 1:
loss_value *= 1. / num_replicas
if var_list is None:
var_list = tape.watched_variables()
grads = tape.gradient(loss_value, var_list, grad_loss)
return list(zip(grads, var_list))
if context.executing_eagerly():
raise RuntimeError("`loss` passed to Optimizer.compute_gradients should "
"be a function when eager execution is enabled.")
# Scale loss for number of replicas (non-callable-loss case).
if scale_loss_by_num_replicas is None:
scale_loss_by_num_replicas = (
distribute_lib.get_loss_reduction() == variable_scope
.VariableAggregation.MEAN)
if scale_loss_by_num_replicas:
num_replicas = distribute_ctx.get_distribution_strategy().num_replicas
if num_replicas > 1:
loss *= 1. / num_replicas
if gate_gradients not in [
optimizer_v1.Optimizer.GATE_NONE, optimizer_v1.Optimizer.GATE_OP,
optimizer_v1.Optimizer.GATE_GRAPH
]:
raise ValueError(
"gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" % gate_gradients)
self._assert_valid_dtypes([loss])
if grad_loss is not None:
self._assert_valid_dtypes([grad_loss])
if var_list is None:
var_list = (
variables.trainable_variables() + ops.get_collection(
ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
else:
var_list = nest.flatten(var_list)
# pylint: disable=protected-access
var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)
# pylint: enable=protected-access
processors = [_get_processor(v) for v in var_list]
if not var_list:
raise ValueError("No variables to optimize.")
var_refs = [p.target() for p in processors]
grads = gradients.gradients(
loss,
var_refs,
grad_ys=grad_loss,
gate_gradients=(gate_gradients == optimizer_v1.Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
stop_gradients=stop_gradients)
if gate_gradients == optimizer_v1.Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the variables
have been updated.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_vars(), _prepare(), _apply_dense(), and _apply_sparse().
# Filter out variables with gradients of `None`.
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
if not grads_and_vars:
raise ValueError("No variables provided.")
filtered = tuple((g, v) for (g, v) in grads_and_vars if g is not None)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([str(v) for _, v in grads_and_vars],))
return distribute_ctx.get_replica_context().merge_call(
self._distributed_apply, filtered, global_step=global_step, name=name)
def _get_or_create_state(self, var_list=None):
"""Either looks up or creates `_OptimizerV2State`.
If any variables are available, they should be passed via the `var_list`
argument, and these will be used to determine the graph to create/retrieve
state for. Otherwise the returned state is for the current default graph.
Args:
var_list: A list of variables to extract a graph from.
Returns:
An `_OptimizerV2State` object.
"""
# Determine the graph_key from the current graph.
eager_execution = context.executing_eagerly()
if eager_execution or var_list is None:
graph = ops.get_default_graph()
else:
graph = ops._get_graph_from_inputs(var_list) # pylint: disable=protected-access
assert graph is not None
graph_key = graph._graph_key # pylint: disable=protected-access
# Get the per graph state by looking up the graph_key.
if graph_key in self._per_graph_state:
per_graph_state = self._per_graph_state[graph_key]
else:
per_graph_state = _OptimizerV2State(self._name)
per_graph_state._init_with_static_hyper(self._hyper) # pylint: disable=protected-access
self._per_graph_state[graph_key] = per_graph_state
return per_graph_state
def _distributed_apply(self, distribution, grads_and_vars, global_step, name):
"""`apply_gradients` for use with a `DistributionStrategy`."""
reduced_grads = distribution.batch_reduce(
variable_scope.VariableAggregation.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
unwrapped_var_list = [x for v in var_list for x in distribution.unwrap(v)]
eager_execution = context.executing_eagerly()
if eager_execution:
# Give a clear error in this case instead of "name not supported
# for Eager Tensors" when we compute non_slot_devices.
for v in unwrapped_var_list:
if isinstance(v, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", v)
with ops.name_scope(name, self._name) as name:
per_graph_state = self._get_or_create_state(var_list=unwrapped_var_list)
# Include the current value of any dynamic hyper parameters in `state`.
non_slot_devices = distribution.non_slot_devices(var_list)
state = per_graph_state._copy_with_dynamic_hyper( # pylint: disable=protected-access
self._hyper, distribution, non_slot_devices)
# Create any slot and non-slot variables we need in `state`.
with ops.init_scope():
self._create_vars(var_list, state)
with ops.name_scope(name): # Re-enter name_scope created above
# Give the child class a chance to do something before we start
# applying gradients.
self._prepare(state)
def update(v, g):
"""Update variable `v` using gradient `g`."""
assert v is not None
# Convert the grad to Tensor or IndexedSlices if necessary, and
# look up a processor for each variable's type.
try:
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError("Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
processor = _get_processor(v)
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
# TODO(apassos): figure out how to get the variable name here.
scope_name = "" if eager_execution else v.op.name
# device_policy is set because non-mirrored tensors will be read in
# `update_op`.
# TODO(josh11b): Make different state objects for each device to
# avoid needing to set the device_policy.
device_policy = context.context().device_policy(
context.DEVICE_PLACEMENT_SILENT)
with ops.name_scope("update_" + scope_name), device_policy:
return processor.update_op(self, g, state)
# Use the processors to update the variables.
update_ops = []
for grad, var in grads_and_vars:
update_ops.extend(distribution.update(var, update, grad, grouped=False))
# Give the child class a chance to do something after applying
# gradients
def finish():
# TODO(josh11b): Make different state objects for each device to
# avoid needing to set the device_policy.
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
return self._finish(state)
update_ops = control_flow_ops.group(update_ops)
with ops.control_dependencies([update_ops]):
finish_updates = distribution.update_non_slot(
non_slot_devices, finish, grouped=False)
# We said grouped=False, which means finish_updates is always a list.
# It will be [None] when finish() returns None.
if finish_updates == [None]:
finish_updates = [update_ops]
# Update `global_step` (if any).
if global_step is None:
apply_updates = distribution.group(finish_updates, name=name)
else:
with ops.control_dependencies(finish_updates):
def update_global_step(global_step, name):
return global_step.assign_add(1, read_value=False, name=name)
apply_updates = distribution.update(global_step, update_global_step,
name)
# Add the training op to the TRAIN_OP graph collection in graph mode.
if not eager_execution:
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
state = self._get_state_for_var(var)
return state.get_slot(var, name) if state is not None else None
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
state = self._get_per_graph_state()
return state.get_slot_names() if state is not None else []
def variables(self):
"""A list of variables which encode the current state of `Optimizer`.
Includes slot variables and additional global variables created by the
optimizer in the current default graph.
Returns:
A list of variables.
"""
state = self._get_per_graph_state()
return state._variables() if state is not None else [] # pylint: disable=protected-access
# --------------
# Methods to be implemented by subclasses if they want to use the
# inherited implementation of apply_gradients() or compute_gradients().
# --------------
def _create_vars(self, var_list, state):
"""Create all slots needed by the variables and any non-slot variables.
Args:
var_list: A list of `Variable` objects.
state: An object with these methods: `create_slot(var, val, slot_name,
optional_op_name)`, `create_slot_with_initializer(` `var, initializer,
shape, dtype, slot_name, optional_op_name)`, `zeros_slot(var, slot_name,
optional_op_name)`, `create_non_slot_variable(initial_value, name,
colocate_with)`, `get_hyper(name)`
"""
# No slots needed by default
pass
def _prepare(self, state):
"""Code to execute before applying gradients.
Note that most uses of _prepare() in Optimizer have been subsumed
by explicit support for hyper parameters in OptimizerV2
Args:
state: An object with a `get_hyper(name)` method.
Returns:
Return value will be ignored.
"""
pass
def _apply_dense(self, grad, var, state):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _resource_apply_dense(self, grad, handle, state):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices,
state):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices may be repeated.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation` which updates the value of the variable.
"""
# pylint: disable=protected-access
summed_grad, unique_indices = optimizer_v1._deduplicate_indexed_slices(
values=grad, indices=indices)
# pylint: enable=protected-access
return self._resource_apply_sparse(summed_grad, handle, unique_indices,
state)
def _resource_apply_sparse(self, grad, handle, indices, state):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices are unique.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _apply_sparse_duplicate_indices(self, grad, var, state):
"""Add ops to apply sparse gradients to `var`, with repeated sparse indices.
Optimizers which override this method must deal with IndexedSlices objects
such as the following:
IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])
The correct interpretation is:
IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])
Many optimizers deal incorrectly with repeated indices when updating based
on sparse gradients (e.g. summing squares rather than squaring the sum, or
applying momentum terms multiple times). Adding first is always the correct
behavior, so this is enforced here by reconstructing the IndexedSlices to
have only unique indices, then calling _apply_sparse.
Optimizers which deal correctly with repeated indices may instead override
this method to avoid the overhead of summing indices.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation`.
"""
# pylint: disable=protected-access
summed_values, unique_indices = optimizer_v1._deduplicate_indexed_slices(
values=grad.values, indices=grad.indices)
# pylint: enable=protected-access
gradient_no_duplicate_indices = ops.IndexedSlices(
indices=unique_indices,
values=summed_values,
dense_shape=grad.dense_shape)
return self._apply_sparse(gradient_no_duplicate_indices, var, state)
def _apply_sparse(self, grad, var, state):
"""Add ops to apply sparse gradients to `var`.
The IndexedSlices object passed to `grad` in this function is by default
pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
indices (see its docstring for details). Optimizers which can tolerate or
have correct special cases for duplicate sparse indices may override
`_apply_sparse_duplicate_indices` instead of this function, avoiding that
overhead.
Args:
grad: `IndexedSlices`, with no repeated indices.
var: A `Variable` object.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _finish(self, state):
"""Do what is needed to finish the update.
This is called inside a scope colocated with any non-slot variables.
Args:
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
The operation to apply updates, or None if no updates.
"""
return None
# --------------
# Utility methods for subclasses.
# --------------
def _get_per_graph_state(self):
# pylint: disable=protected-access
return self._per_graph_state.get(ops.get_default_graph()._graph_key, None)
def _get_state_for_var(self, var):
# pylint: disable=protected-access
return self._per_graph_state.get(var._graph_key, None)
# --------------
# Overridden methods from Checkpointable.
# --------------
def _track_checkpointable(self, *args, **kwargs):
"""Optimizers may not track dependencies. Raises an error."""
raise NotImplementedError(
"Optimizers may not have dependencies. File a feature request if this "
"limitation bothers you.")
@property
def _checkpoint_dependencies(self):
"""From Checkpointable. Gather graph-specific non-slot variables to save."""
current_graph_non_slot_variables = []
state = self._get_per_graph_state()
if state is not None:
for name, variable_object in sorted(
state._non_slot_dict.items(), # pylint: disable=protected-access
# Avoid comparing variables
key=lambda item: item[0]):
current_graph_non_slot_variables.append(
checkpointable.CheckpointableReference(
name=name, ref=variable_object))
# Note: ignores super(); Optimizers may not have any dependencies outside of
# state objects.
return current_graph_non_slot_variables
def _lookup_dependency(self, name):
"""From Checkpointable. Find a non-slot variable in the current graph."""
state = self._get_per_graph_state()
if state is None:
return None
else:
return state.get_non_slot(name)
@property
def _deferred_dependencies(self):
"""Lets Checkpointable know where non-slot variables are created.
If necessary, creates a new state object for the current default graph.
Checkpointable will then add entries to that state's deferred dependency
dictionary. The state object will check that dictionary when creating
non-slot variables, restoring their value if an entry is found.
Returns:
A dictionary which holds deferred dependencies for the current default
graph.
"""
state = self._get_or_create_state()
return state._deferred_dependencies # pylint: disable=protected-access
def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,
variable):
"""Checkpointable: Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored.
Args:
slot_variable_position: A `checkpointable._CheckpointPosition` object
indicating the slot variable `Checkpointable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
state = self._get_or_create_state(var_list=[variable])
state._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position=slot_variable_position,
slot_name=slot_name,
variable=variable,
optional_op_name=self._name)
# --------------
# Unsupported parent methods
# --------------
def _slot_dict(self, slot_name):
raise NotImplementedError("_slot_dict() method unsupported in OptimizerV2")
def _get_or_make_slot(self, var, val, slot_name, op_name):
raise NotImplementedError(
"_get_or_make_slot() method unsupported in OptimizerV2")
def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, op_name):
raise NotImplementedError(
"_get_or_make_slot_with_initializer() method unsupported in "
"OptimizerV2")
def _create_non_slot_variable(self, initial_value, name, colocate_with):
raise NotImplementedError(
"_create_non_slot_variable() method unsupported in OptimizerV2")
def _get_non_slot_variable(self, name, graph=None):
raise NotImplementedError(
"_get_non_slot_variable() method unsupported in OptimizerV2")
def _non_slot_variables(self):
raise NotImplementedError(
"_non_slot_variables() method unsupported in OptimizerV2")
|
yanheven/nova
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py
|
9
|
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import security_groups as secgroups_v2
from nova.api.openstack.compute.plugins.v3 import security_groups as \
secgroups_v21
from nova import compute
from nova.compute import power_state
from nova import context as context_maker
import nova.db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CONF = cfg.CONF
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_template(**kwargs):
sg = kwargs.copy()
sg.setdefault('tenant_id', '123')
sg.setdefault('name', 'test')
sg.setdefault('description', 'test-description')
return sg
def security_group_db(security_group, id=None):
attrs = security_group.copy()
if 'tenant_id' in attrs:
attrs['project_id'] = attrs.pop('tenant_id')
if id is not None:
attrs['id'] = id
attrs.setdefault('rules', [])
attrs.setdefault('instances', [])
return AttrDict(attrs)
def security_group_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'tcp')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('parent_group_id', 2)
return rule
def security_group_rule_db(rule, id=None):
attrs = rule.copy()
if 'ip_protocol' in attrs:
attrs['protocol'] = attrs.pop('ip_protocol')
return AttrDict(attrs)
def return_server(context, server_id,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': int(server_id),
'power_state': 0x01,
'host': "localhost",
'uuid': FAKE_UUID1,
'name': 'asdf'})
def return_server_by_uuid(context, server_uuid,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
'host': "localhost",
'uuid': server_uuid,
'name': 'asdf'})
def return_non_running_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id, 'power_state': power_state.SHUTDOWN,
'uuid': FAKE_UUID1, 'host': "localhost", 'name': 'asdf'})
def return_security_group_by_name(context, project_id, group_name):
return {'id': 1, 'name': group_name,
"instances": [{'id': 1, 'uuid': FAKE_UUID1}]}
def return_security_group_without_instances(context, project_id, group_name):
return {'id': 1, 'name': group_name}
def return_server_nonexistent(context, server_id, columns_to_join=None):
raise exception.InstanceNotFound(instance_id=server_id)
class TestSecurityGroupsV21(test.TestCase):
secgrp_ctl_cls = secgroups_v21.SecurityGroupController
server_secgrp_ctl_cls = secgroups_v21.ServerSecurityGroupController
secgrp_act_ctl_cls = secgroups_v21.SecurityGroupActionController
def setUp(self):
super(TestSecurityGroupsV21, self).setUp()
self.controller = self.secgrp_ctl_cls()
self.server_controller = self.server_secgrp_ctl_cls()
self.manager = self.secgrp_act_ctl_cls()
# This needs to be done here to set fake_id because the derived
# class needs to be called first if it wants to set
# 'security_group_api' and this setUp method needs to be called.
if self.controller.security_group_api.id_is_uuid:
self.fake_id = '11111111-1111-1111-1111-111111111111'
else:
self.fake_id = '11111111'
def _assert_no_security_groups_reserved(self, context):
"""Check that no reservations are leaked during tests."""
result = quota.QUOTAS.get_project_quotas(context, context.project_id)
self.assertEqual(result['security_groups']['reserved'], 0)
def _assert_security_groups_in_use(self, project_id, user_id, in_use):
context = context_maker.get_admin_context()
result = quota.QUOTAS.get_user_quotas(context, project_id, user_id)
self.assertEqual(result['security_groups']['in_use'], in_use)
def test_create_security_group(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], 'test')
self.assertEqual(res_dict['security_group']['description'],
'test-description')
def test_create_security_group_with_no_name(self):
sg = security_group_template()
del sg['name']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, sg)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_description(self):
sg = security_group_template()
del sg['description']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_empty_description(self):
sg = security_group_template()
sg['description'] = ""
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
try:
self.controller.create(req, {'security_group': sg})
self.fail('Should have raised BadRequest exception')
except webob.exc.HTTPBadRequest as exc:
self.assertEqual('description has a minimum character requirement'
' of 1.', exc.explanation)
except exception.InvalidInput as exc:
self.fail('Should have raised BadRequest exception instead of')
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_name(self):
sg = security_group_template(name='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_name(self):
sg = security_group_template(name=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_description(self):
sg = security_group_template(description='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_description(self):
sg = security_group_template(description=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_duplicate_name(self):
sg = security_group_template()
# FIXME: Stub out _get instead of creating twice
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller.create(req, {'security_group': sg})
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, None)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_security_group(self):
body = {'no-securityGroup': None}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_name(self):
sg = security_group_template(name='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_description(self):
sg = security_group_template(description='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_name(self):
sg = security_group_template(name=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_description(self):
sg = security_group_template(description=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
for num in range(1, CONF.quota_security_groups):
name = 'test%s' % num
sg = security_group_template(name=name)
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], name)
sg = security_group_template()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
req, {'security_group': sg})
def test_get_security_group_list(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_security_groups(context, project_id):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_missing_group_id_rule(self):
groups = []
rule1 = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=1,
group_id={}, id=88,
protocol='TCP')
rule2 = security_group_rule_template(cidr='10.2.3.125/24',
parent_group_id=1,
id=99, protocol=88,
group_id='HAS_BEEN_DELETED')
sg = security_group_template(id=1,
name='test',
description='test-desc',
rules=[rule1, rule2])
groups.append(sg)
# An expected rule here needs to be created as the api returns
# different attributes on the rule for a response than what was
# passed in. For example:
# "cidr": "0.0.0.0/0" ->"ip_range": {"cidr": "0.0.0.0/0"}
expected_rule = security_group_rule_template(
ip_range={'cidr': '10.2.3.124/24'}, parent_group_id=1,
group={}, id=88, ip_protocol='TCP')
expected = security_group_template(id=1,
name='test',
description='test-desc',
rules=[expected_rule])
expected = {'security_groups': [expected]}
def return_security_groups(context, project, search_opts):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(self.controller.security_group_api, 'list',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_all_tenants(self):
all_groups = []
tenant_groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
all_groups.append(sg)
if name == 'default':
tenant_groups.append(sg)
all = {'security_groups': all_groups}
tenant_specific = {'security_groups': tenant_groups}
def return_all_security_groups(context):
return [security_group_db(sg) for sg in all_groups]
self.stubs.Set(nova.db, 'security_group_get_all',
return_all_security_groups)
def return_tenant_security_groups(context, project_id):
return [security_group_db(sg) for sg in tenant_groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_tenant_security_groups)
path = '/v2/fake/os-security-groups'
req = fakes.HTTPRequest.blank(path, use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, tenant_specific)
req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, all)
def test_get_security_group_by_instance(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_instance)
def return_security_groups(context, instance_uuid):
self.assertEqual(instance_uuid, FAKE_UUID1)
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_instance',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
('fake', FAKE_UUID1))
res_dict = self.server_controller.index(req, FAKE_UUID1)
self.assertEqual(res_dict, expected)
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.security_group_get_by_instance', return_value=[])
def test_get_security_group_empty_for_instance(self, mock_sec_group,
mock_db_get_ins):
expected = {'security_groups': []}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
mock_db_get_ins.side_effect = return_instance
req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
('fake', FAKE_UUID1))
res_dict = self.server_controller.index(req, FAKE_UUID1)
self.assertEqual(expected, res_dict)
mock_sec_group.assert_called_once_with(req.environ['nova.context'],
FAKE_UUID1)
def test_get_security_group_by_instance_non_existing(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, '1')
def test_get_security_group_by_instance_invalid_id(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/servers/invalid/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, 'invalid')
def test_get_security_group_by_id(self):
sg = security_group_template(id=2, rules=[])
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.show(req, '2')
expected = {'security_group': sg}
self.assertEqual(res_dict, expected)
def test_get_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_get_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_update_security_group(self):
sg = security_group_template(id=2, rules=[])
sg_update = security_group_template(id=2, rules=[],
name='update_name', description='update_desc')
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
def return_update_security_group(context, group_id, values,
columns_to_join=None):
self.assertEqual(sg_update['id'], group_id)
self.assertEqual(sg_update['name'], values['name'])
self.assertEqual(sg_update['description'], values['description'])
return security_group_db(sg_update)
self.stubs.Set(nova.db, 'security_group_update',
return_update_security_group)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.update(req, '2',
{'security_group': sg_update})
expected = {'security_group': sg_update}
self.assertEqual(res_dict, expected)
def test_update_security_group_name_to_default(self):
sg = security_group_template(id=2, rules=[], name='default')
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, '2', {'security_group': sg})
def test_update_default_security_group_fail(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, '1', {'security_group': sg})
def test_delete_security_group_by_id(self):
sg = security_group_template(id=1, project_id='fake_project',
user_id='fake_user', rules=[])
self.called = False
def security_group_destroy(context, id):
self.called = True
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_destroy',
security_group_destroy)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.controller.delete(req, '1')
self.assertTrue(self.called)
def test_delete_security_group_by_admin(self):
sg = security_group_template(id=2, rules=[])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller.create(req, {'security_group': sg})
context = req.environ['nova.context']
# Ensure quota usage for security group is correct.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 2)
# Delete the security group by admin.
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2',
use_admin_context=True)
self.controller.delete(req, '2')
# Ensure quota for security group in use is released.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 1)
def test_delete_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_delete_security_group_in_use(self):
sg = security_group_template(id=1, rules=[])
def security_group_in_use(context, id):
return True
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_in_use',
security_group_in_use)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '1')
def test_associate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(addSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_by_invalid_server_id(self):
body = dict(addSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, 'invalid', body)
def test_associate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
nova.db.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_by_invalid_server_id(self):
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, 'invalid',
body)
def test_disassociate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
def test_disassociate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
nova.db.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
self.mox.ReplayAll()
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
class TestSecurityGroupsV2(TestSecurityGroupsV21):
secgrp_ctl_cls = secgroups_v2.SecurityGroupController
server_secgrp_ctl_cls = secgroups_v2.ServerSecurityGroupController
secgrp_act_ctl_cls = secgroups_v2.SecurityGroupActionController
class TestSecurityGroupRulesV21(test.TestCase):
secgrp_ctl_cls = secgroups_v21.SecurityGroupRulesController
def setUp(self):
super(TestSecurityGroupRulesV21, self).setUp()
self.controller = self.secgrp_ctl_cls()
if self.controller.security_group_api.id_is_uuid:
id1 = '11111111-1111-1111-1111-111111111111'
id2 = '22222222-2222-2222-2222-222222222222'
self.invalid_id = '33333333-3333-3333-3333-333333333333'
else:
id1 = 1
id2 = 2
self.invalid_id = '33333333'
self.sg1 = security_group_template(id=id1)
self.sg2 = security_group_template(
id=id2, name='authorize_revoke',
description='authorize-revoke testing')
db1 = security_group_db(self.sg1)
db2 = security_group_db(self.sg2)
def return_security_group(context, group_id, columns_to_join=None):
if group_id == db1['id']:
return db1
if group_id == db2['id']:
return db2
raise exception.SecurityGroupNotFound(security_group_id=group_id)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
self.parent_security_group = db2
def test_create_by_cidr(self):
rule = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"10.2.3.124/24")
def test_create_by_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
def test_create_by_same_group_id(self):
rule1 = security_group_rule_template(group_id=self.sg1['id'],
from_port=80, to_port=80,
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
rule2 = security_group_rule_template(group_id=self.sg1['id'],
from_port=81, to_port=81,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule2})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['from_port'], 81)
self.assertEqual(security_group_rule['to_port'], 81)
def test_create_none_value_from_to_port(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertIsNone(security_group_rule['from_port'])
self.assertIsNone(security_group_rule['to_port'])
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_icmp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'ICMP'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'ICMP')
self.assertEqual(security_group_rule['from_port'], -1)
self.assertEqual(security_group_rule['to_port'], -1)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_tcp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'TCP'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'TCP')
self.assertEqual(security_group_rule['from_port'], 1)
self.assertEqual(security_group_rule['to_port'], 65535)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_by_invalid_cidr_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=22,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/2433")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_tcp_port_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=75534,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_icmp_port_json(self):
rule = security_group_rule_template(
ip_protocol="icmp",
from_port=1,
to_port=256,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_cidr(self):
rule = security_group_rule_template(cidr='10.0.0.0/24',
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
rule = security_group_rule_template(group_id=1)
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, None)
def test_create_with_no_security_group_rule_in_body(self):
rules = {'test': 'test'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, rules)
def test_create_with_invalid_parent_group_id(self):
rule = security_group_rule_template(parent_group_id='invalid')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_existing_parent_group_id(self):
rule = security_group_rule_template(group_id=None,
parent_group_id=self.invalid_id)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_existing_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_protocol(self):
rule = security_group_rule_template(ip_protocol='invalid-protocol',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_protocol(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['ip_protocol']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_from_port(self):
rule = security_group_rule_template(from_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_to_port(self):
rule = security_group_rule_template(to_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_from_port(self):
rule = security_group_rule_template(from_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_to_port(self):
rule = security_group_rule_template(to_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_from_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['from_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_to_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['to_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_cidr(self):
rule = security_group_rule_template(cidr='10.2.2222.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_cidr_group(self):
rule = security_group_rule_template(parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_with_invalid_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_empty_group_id(self):
rule = security_group_rule_template(group_id='',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_nonexist_group_id(self):
rule = security_group_rule_template(group_id=self.invalid_id,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_same_group_parent_id_and_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg1['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
self.assertEqual(security_group_rule['group']['name'],
self.sg1['name'])
def _test_create_with_no_ports_and_no_group(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def _test_create_with_no_ports(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
if proto == 'icmp':
expected_rule['to_port'] = -1
expected_rule['from_port'] = -1
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_no_ports_icmp(self):
self._test_create_with_no_ports_and_no_group('icmp')
self._test_create_with_no_ports('icmp')
def test_create_with_no_ports_tcp(self):
self._test_create_with_no_ports_and_no_group('tcp')
self._test_create_with_no_ports('tcp')
def test_create_with_no_ports_udp(self):
self._test_create_with_no_ports_and_no_group('udp')
self._test_create_with_no_ports('udp')
def _test_create_with_ports(self, proto, from_port, to_port):
rule = {
'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': from_port,
'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
self.assertEqual(proto, security_group_rule['ip_protocol'])
self.assertEqual(from_port, security_group_rule['from_port'])
self.assertEqual(to_port, security_group_rule['to_port'])
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_ports_icmp(self):
self._test_create_with_ports('icmp', 0, 1)
self._test_create_with_ports('icmp', 0, 0)
self._test_create_with_ports('icmp', 1, 0)
def test_create_with_ports_tcp(self):
self._test_create_with_ports('tcp', 1, 1)
self._test_create_with_ports('tcp', 1, 65535)
self._test_create_with_ports('tcp', 65535, 65535)
def test_create_with_ports_udp(self):
self._test_create_with_ports('udp', 1, 1)
self._test_create_with_ports('udp', 1, 65535)
self._test_create_with_ports('udp', 65535, 65535)
def test_delete(self):
rule = security_group_rule_template(id=self.sg2['id'],
parent_group_id=self.sg2['id'])
def security_group_rule_get(context, id):
return security_group_rule_db(rule)
def security_group_rule_destroy(context, id):
pass
self.stubs.Set(nova.db, 'security_group_rule_get',
security_group_rule_get)
self.stubs.Set(nova.db, 'security_group_rule_destroy',
security_group_rule_destroy)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.sg2['id'])
self.controller.delete(req, self.sg2['id'])
def test_delete_invalid_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
'/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_non_existing_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.invalid_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.invalid_id)
def test_create_rule_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
'to_port': num, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']
}
self.controller.create(req, {'security_group_rule': rule})
rule = {
'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
req, {'security_group_rule': rule})
def test_create_rule_cidr_allow_all(self):
rule = security_group_rule_template(cidr='0.0.0.0/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_rule_cidr_ipv6_allow_all(self):
rule = security_group_rule_template(cidr='::/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"::/0")
def test_create_rule_cidr_allow_some(self):
rule = security_group_rule_template(cidr='15.0.0.0/8',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"15.0.0.0/8")
def test_create_rule_cidr_bad_netmask(self):
rule = security_group_rule_template(cidr='15.0.0.0/0')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
class TestSecurityGroupRulesV2(TestSecurityGroupRulesV21):
secgrp_ctl_cls = secgroups_v2.SecurityGroupRulesController
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get_all(*args, **kwargs):
base = {'id': 1, 'description': 'foo', 'user_id': 'bar',
'project_id': 'baz', 'deleted': False, 'deleted_at': None,
'updated_at': None, 'created_at': None}
db_list = [
fakes.stub_instance(
1, uuid=UUID1,
security_groups=[dict(base, **{'name': 'fake-0-0'}),
dict(base, **{'name': 'fake-0-1'})]),
fakes.stub_instance(
2, uuid=UUID2,
security_groups=[dict(base, **{'name': 'fake-1-0'}),
dict(base, **{'name': 'fake-1-1'})])
]
return instance_obj._make_instance_list(args[1],
objects.InstanceList(),
db_list,
['metadata', 'system_metadata',
'security_groups', 'info_cache'])
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3,
security_groups=[{'name': 'fake-2-0'},
{'name': 'fake-2-1'}])
return fake_instance.fake_instance_obj(args[1],
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
def fake_compute_create(*args, **kwargs):
return ([fake_compute_get(*args, **kwargs)], '')
def fake_get_instances_security_groups_bindings(inst, context, servers):
groups = {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}],
UUID3: [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]}
result = {}
for server in servers:
result[server['id']] = groups.get(server['id'])
return result
class SecurityGroupsOutputTestV21(test.TestCase):
base_url = '/v2/fake/servers'
content_type = 'application/json'
def setUp(self):
super(SecurityGroupsOutputTestV21, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create', fake_compute_create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Security_groups'])
self.app = self._setup_app()
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-security-groups', 'servers'))
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
if body:
req.method = 'POST'
req.body = self._encode_body(body)
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(self.app)
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(self.base_url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_show(self):
url = self.base_url + '/' + UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = self.base_url + '/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = self.base_url + '/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class SecurityGroupsOutputTestV2(SecurityGroupsOutputTestV21):
def _setup_app(self):
return fakes.wsgi_app(init_only=('servers',))
class SecurityGroupsOutputPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(SecurityGroupsOutputPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupsOutputController()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
self.policy.set_rules(self.rule)
def test_show_policy_failed(self):
self.controller.show(self.req, None, FAKE_UUID1)
def test_create_policy_failed(self):
self.controller.create(self.req, None, {})
def test_detail_policy_failed(self):
self.controller.detail(self.req, None)
class PolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(PolicyEnforcementV21, self).setUp()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
def _common_policy_check(self, func, *arg, **kwarg):
self.policy.set_rules(self.rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
class SecurityGroupPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupController()
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_show_policy_failed(self):
self._common_policy_check(self.controller.show, self.req, FAKE_UUID1)
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1)
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_update_policy_failed(self):
self._common_policy_check(
self.controller.update, self.req, FAKE_UUID1, {})
class ServerSecurityGroupPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(ServerSecurityGroupPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.ServerSecurityGroupController()
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req, FAKE_UUID1)
class SecurityGroupRulesPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupRulesPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupRulesController()
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1)
class SecurityGroupActionPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupActionPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupActionController()
def test_add_security_group_policy_failed(self):
self._common_policy_check(
self.controller._addSecurityGroup, self.req, FAKE_UUID1, {})
def test_remove_security_group_policy_failed(self):
self._common_policy_check(
self.controller._removeSecurityGroup, self.req, FAKE_UUID1, {})
|
mabushadi/dpxdt
|
refs/heads/master
|
dependencies/appengine-gcs-client/python/demo/cloudstorage/test_utils.py
|
45
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""Utils for testing."""
class MockUrlFetchResult(object):
def __init__(self, status, headers, body):
self.status_code = status
self.headers = headers
self.content = body
self.content_was_truncated = False
self.final_url = None
|
qrizan/moopy
|
refs/heads/master
|
moopy/moopy/wsgi.py
|
1
|
"""
WSGI config for moopy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "moopy.settings")
application = get_wsgi_application()
|
ProjectCloudly/cloudly
|
refs/heads/master
|
userprofile/views.py
|
2
|
# -*- coding: utf-8 -*-
import os
import time
import logging
import unicodedata
import datetime
import string, pickle
from random import choice
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.contrib.auth.models import User
from userprofile.models import Activity
from userprofile.models import Profile as userprofile
from django.contrib.auth import authenticate
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.middleware import csrf
import boto.ec2
import boto.ec2.cloudwatch
from amazon import s3_funcs
from amazon import s3_funcs_shortcuts
from vms.models import Cache
logger = logging.getLogger(__name__)
from django.conf import settings
from django.core.mail import send_mail
from twython import Twython
AWS_REGIONS = {
"ap-northeast-1":"Asia Pacific (Tokyo) Region",
"ap-southeast-1":"Asia Pacific (Singapore) Region",
"ap-southeast-2":"Asia Pacific (Sydney) Region",
"eu-west-1":"EU (Ireland) Region",
"eu-central-1":"EU (Frankfurt) Region",
"sa-east-1":"South America (Sao Paulo) Region",
"us-east-1":"US East (Northern Virginia) Region",
"us-west-1":"US West (Northern California) Region",
"us-west-2":"US West (Oregon) Region",
}
def _remove_accents(data):
return ''.join(x for x in unicodedata.normalize('NFKD', data) if x in string.ascii_letters).lower()
def _get_or_create_csrf_token(request):
token = request.META.get('CSRF_COOKIE', None)
if token is None:
token = csrf._get_new_csrf_key()
request.META['CSRF_COOKIE'] = token
request.META['CSRF_COOKIE_USED'] = True
token = "<input type='hidden' name='csrf_token' value='%s'" % (token)
return token
def _log_user_activity(userprofile, activity, link, function="", ip=""):
activity = Activity.objects.create(user=userprofile.user,activity=activity,link=link)
if(ip):
activity.ip_addr = ip
activity.save()
if(activity.activity=="click"):
userprofile.clicks += 1
if(function):
userprofile.function = function
userprofile.save()
return activity
def _simple_email_validation(email):
if('@' and '.' in email):
return True
return False
def login_as_demo_user(request):
user = User.objects.get(username='demo@demo.com')
user.set_password('demo')
user.save()
login(request, authenticate(username='demo@demo.com', password='demo'))
return HttpResponseRedirect("/")
def user_logout(request):
print '-- logout'
try:
logout(request)
except: pass
print request.user
return HttpResponseRedirect("/goodbye/")
@login_required()
def reset_cloud_settings(request):
print '-- reset cloud settings'
user = request.user
profile = userprofile.objects.get(user=request.user)
print request.user
profile.aws_access_key = ""
profile.aws_secret_key = ""
profile.aws_ec2_verified = False
profile.save()
vms_cache = Cache.objects.get(user=user)
vms_cache.vms_response = ""
vms_cache.save()
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/cloud/settings/reset/","change_password",ip=ip)
return HttpResponseRedirect("/cloud/settings/")
def goodbye(request):
return render_to_response('goodbye.html', {},)
def register(request):
print '-- registration:'
err = None
if request.POST:
print request.POST
name = request.POST[u'username']
email = request.POST[u'email']
username = email
try:
if(request.POST['agree']!='on'):
err = "must_agree_tos"
except: err = "must_agree_tos"
password1 = request.POST[u'password1']
password2 = password1
print username
if not password1 or not password2:
err = "empty_password"
print err
if(password1 != password2):
err = "password_mismatch"
print err
if not _simple_email_validation(email):
err = "invalid_email_address"
print err
if not err:
passwd = password1
try:
User.objects.create_user(username, email, passwd, last_login=datetime.datetime.now())
except:
err = "duplicate_username"
print err
if not err:
user = authenticate(username=username, password=passwd)
if(user):
secret = (''.join([choice(string.digits) for i in range(3)]) + '-' + \
''.join([choice(string.letters + string.digits) for i in range(4)]) + '-' + \
''.join([choice(string.digits) for i in range(5)])).upper()
agent_hash = (''.join([choice(string.letters + string.digits) for i in range(12)]))
username = _remove_accents(username)
userprofile.objects.get_or_create(user=user,secret=secret,name=name,agent_hash=agent_hash,language="EN")
login(request, user)
request.session['language'] = "us"
print 'new user registered'
print username
return HttpResponseRedirect("/welcome/")
return render_to_response('register.html', {'err':err,}, context_instance=RequestContext(request) )
def auth(request):
print '-- auth:'
err = False
if(request.method == 'POST'):
post = request.POST
print post
try:
email = request.POST['username']
passwprd = request.POST['password']
except:
print 'failed login code:1'
err = True
#return HttpResponseRedirect("/register")
try:
user = User.objects.get(email=email)
except:
print 'failed login code:2'
err = True
#return HttpResponseRedirect("/register")
try:
user = authenticate(username=user.username, password=passwprd)
login(request, user)
except:
print 'failed login code:3'
err = True
#return HttpResponseRedirect("/register")
if(not err):
print 'user logged in', user
return HttpResponseRedirect("/")
return render_to_response('login.html',{'err':err},context_instance=RequestContext(request))
@login_required()
def cloud_settings(request):
print '-- cloud settings:'
user = request.user
profile = userprofile.objects.get(user=request.user)
secret = profile.secret
user.last_login = datetime.datetime.now()
user.save()
print request.user
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/cloud/settings/","cloud_settings",ip=ip)
profile_regions = profile.aws_enabled_regions.split(',')
aws_ec2_verified = profile.aws_ec2_verified
updated_regions = False
if request.GET:
updated = request.GET['updated']
if(updated=='regions'): updated_regions = True
return render_to_response('cloud_settings.html', {'aws_ec2_verified':aws_ec2_verified,'aws_regions':AWS_REGIONS,'profile_regions':profile_regions,'profile':profile,'secret':secret,'updated_regions':updated_regions,}, context_instance=RequestContext(request))
@login_required()
def cloud_settings_update_credentials(request):
user = request.user
profile = userprofile.objects.get(user=request.user)
secret = profile.secret
err = None
aws_access_key = request.POST['aws_access_key']
aws_secret_key = request.POST['aws_access_secret']
if(aws_secret_key):
profile.aws_secret_key = aws_secret_key
profile.save()
else: err = "Missing AWS Secret"
if(aws_access_key):
profile.aws_access_key = aws_access_key
profile.save()
else: err = "Missing AWS Access Key"
profile_regions = profile.aws_enabled_regions.split(',')
try:
ec2conn = boto.ec2.connect_to_region( "us-west-1",
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
regions_ = ec2conn.get_all_regions()
# this is to satisfy codacy...
regions_ = regions_
profile.aws_ec2_verified = True
except:
err = "AWS verification failed. Please check your Access Key and Secret and try again."
profile.aws_ec2_verified = False
profile.save()
return render_to_response('cloud_settings.html', {'err':err,'aws_ec2_verified':profile.aws_ec2_verified,'aws_regions':AWS_REGIONS,'profile_regions':profile_regions,'profile':profile,'secret':secret,}, context_instance=RequestContext(request))
@login_required()
def change_password(request):
print '-- change password:'
user = request.user
profile = userprofile.objects.get(user=request.user)
secret = profile.secret
print request.user
error = None
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/account/password/","change_password",ip=ip)
if(request.POST):
current_passwd = request.POST['current_passwd']
new_passwd = request.POST['new_passwd']
new_passwd_repeat = request.POST['new_passwd_repeat']
if(new_passwd != new_passwd_repeat):
error = "Passwords do not match."
user = authenticate(username=request.user, password=current_passwd)
if(not user):
error = "Wrong password."
if(not error):
user.set_password(new_passwd)
user.save()
return HttpResponseRedirect("/account/settings/")
return render_to_response('account_change_password.html', {'error':error,}, context_instance=RequestContext(request))
@login_required()
def cloud_settings_update_regions(request):
enable_regions = request.POST.getlist('checkboxes')
c=0
enabled_regions = ""
for region in enable_regions:
if(c):
enabled_regions += ","+str(region)
else:
enabled_regions = str(region)
c+=1
user = request.user
profile = userprofile.objects.get(user=request.user)
profile.aws_enabled_regions = enabled_regions
profile.save()
return HttpResponseRedirect("/cloud/settings?updated=regions")
@login_required()
def account_settings(request):
print '-- account settings:'
user = request.user
user.last_login = datetime.datetime.now()
user.save()
profile = userprofile.objects.get(user=request.user)
secret = profile.secret
print request.user
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/account/settings/","account_settings",ip=ip)
return render_to_response('account_settings.html', {'request':request, 'aws_regions':AWS_REGIONS,'user':user,'profile':profile,}, context_instance=RequestContext(request))
def begin_twitter_auth(request):
# Instantiate Twython with the first leg of our trip.
twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET)
# Request an authorization url to send the user to...
callback_url = request.build_absolute_uri("/thanks/")
auth_props = twitter.get_authentication_tokens(callback_url)
# Then send them over there, durh.
request.session['request_token'] = auth_props
request.session['next_url'] = request.GET.get('next',None)
return HttpResponseRedirect(auth_props['auth_url'])
def thanks(request, redirect_url=settings.LOGIN_REDIRECT_URL):
# Now that we've got the magic tokens back from Twitter, we need to exchange
# for permanent ones and store them...
oauth_token = request.session['request_token']['oauth_token']
oauth_token_secret = request.session['request_token']['oauth_token_secret']
twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET,
oauth_token, oauth_token_secret)
# Retrieve the tokens we want...
authorized_tokens = twitter.get_authorized_tokens(request.GET['oauth_verifier'])
# If they already exist, grab them, login and redirect to a page displaying stuff.
try:
user = User.objects.get(username=authorized_tokens['screen_name'])
except User.DoesNotExist:
# We mock a creation here; no email, password is just the token, etc.
secret = (''.join([choice(string.digits) for i in range(3)]) + '-' + \
''.join([choice(string.letters + string.digits) for i in range(4)]) + '-' + \
''.join([choice(string.digits) for i in range(5)])).upper()
agent_hash = (''.join([choice(string.letters + string.digits) for i in range(12)]))
user = User.objects.create_user(authorized_tokens['screen_name'], "n/a", authorized_tokens['oauth_token_secret'])
profile = userprofile()
profile.user = user
profile.name = authorized_tokens['screen_name']
profile.secret = secret
profile.agent_hash = agent_hash
profile.oauth_token = authorized_tokens['oauth_token']
profile.oauth_secret = authorized_tokens['oauth_token_secret']
profile.save()
user = authenticate(
username=authorized_tokens['screen_name'],
password=authorized_tokens['oauth_token_secret']
)
login(request, user)
redirect_url = request.session.get('next_url', redirect_url)
return HttpResponseRedirect("/")
def user_timeline(request):
user = request.user.profile
twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET,
user.oauth_token, user.oauth_secret)
user_tweets = twitter.get_home_timeline()
return render_to_response('tweets.html', {'tweets': user_tweets})
|
PongPi/isl-odoo
|
refs/heads/8.0
|
addons/l10n_be_hr_payroll_account/__init__.py
|
430
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sgraham/nope
|
refs/heads/master
|
tools/gyp/test/mac/gyptest-cflags.py
|
100
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that compile-time flags work.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'cflags'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
jameshensman/pymc3
|
refs/heads/master
|
pymc3/examples/factor_potential.py
|
14
|
from pymc3 import *
with Model() as model:
x = Normal('x', 1, 1)
x2 = Potential('x2', -x ** 2)
start = model.test_point
h = find_hessian(start)
step = Metropolis(model.vars, h)
def run(n = 3000):
if n == "short":
n = 50
with model:
trace = sample(n, step, start)
if __name__ == '__main__':
run()
|
lucuma/solution
|
refs/heads/master
|
tests/test_collections.py
|
2
|
# coding=utf-8
from operator import eq
import solution as f
def lists_are_equal(l1, l2):
return all(map(eq, l1, l2))
def test_render_():
field = f.Collection()
field.name = 'abc'
field.load_data(u'a, b,c')
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input foo="bar" name="abc" type="text" value="a, b, c">')
assert (field.as_textarea(foo='bar') ==
u'<textarea foo="bar" name="abc">a, b, c</textarea>')
assert (field(foo='bar', type='email') ==
u'<input foo="bar" name="abc" type="email" value="a, b, c">')
field.load_data(obj_value=[u'd', u'e', u'f'])
assert field() == u'<input name="abc" type="text" value="d, e, f">'
field.sep = '|'
field.load_data(obj_value=[u'a', u'b', u'c'])
assert field() == u'<input name="abc" type="text" value="a|b|c">'
field = f.Collection(validate=[f.Required])
field.name = u'abc'
field.load_data(u'a,b')
assert (field() ==
u'<input name="abc" type="text" value="a, b" required>')
assert (field(required=False) ==
u'<input name="abc" type="text" value="a, b">')
field = f.Collection()
field.name = u'abc'
field.load_data([])
assert field() == u'<input name="abc" type="text" value="">'
field.load_data([], [])
assert field() == u'<input name="abc" type="text" value="">'
def test_validate_collection():
field = f.Collection()
field.name = 'abc'
field.load_data(u'a, b,c ')
assert lists_are_equal(field.validate(), [u'a', u'b', u'c'])
field.load_data([u'a, b'])
assert lists_are_equal(field.validate(), [u'a', u'b'])
field.validate() == [u'a', u'b']
field = f.Collection(sep='|')
field.load_data(u'a, b,c ')
assert lists_are_equal(field.validate(), [u'a, b,c'])
def test_filter_collection():
def filter_the_b(py_value):
return py_value != u'b'
field = f.Collection(filters=[filter_the_b])
field.name = 'abc'
field.load_data(u'a, b,c')
assert lists_are_equal(field.validate(), [u'a', u'c'])
field = f.Collection(filters=[f.ValidEmail])
field.name = 'abc'
field.load_data([u'a@example.com,b@example.com'])
assert field.validate() == [u'a@example.com', u'b@example.com']
def test_collection_as_dict():
field = f.Collection()
field.name = 'abc'
expdict = {
'name': u'abc',
'value': [],
'error': '',
}
result = sorted(list(field.as_dict().items()))
expected = sorted(list(expdict.items()))
assert result == expected
field.load_data(u'a, b,c')
expdict['value'] = u'a,b,c'.split(',')
result = sorted(list(field.as_dict().items()))
expected = sorted(list(expdict.items()))
print(result)
print(expected)
assert result == expected
|
crakensio/django_training
|
refs/heads/master
|
lib/python2.7/site-packages/pip/commands/help.py
|
401
|
from pip.basecommand import Command, SUCCESS
from pip.exceptions import CommandError
class HelpCommand(Command):
"""Show help for commands"""
name = 'help'
usage = """
%prog <command>"""
summary = 'Show help for commands.'
def run(self, options, args):
from pip.commands import commands, get_similar_commands
try:
# 'pip help' with no args is handled by pip.__init__.parseopt()
cmd_name = args[0] # the command we need help for
except IndexError:
return SUCCESS
if cmd_name not in commands:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
command = commands[cmd_name]()
command.parser.print_help()
return SUCCESS
|
trondkr/romstools
|
refs/heads/master
|
CreateObsFileIS4DVAR/writeObsfile.py
|
1
|
from netCDF4 import Dataset
from netCDF4 import num2date
import numpy as np
import time
import os
import datetime
__author__ = 'Trond Kristiansen'
__email__ = 'trond.kristiansen@imr.no'
__created__ = datetime.datetime(2012, 12, 30)
__modified__ = datetime.datetime(2013, 2, 12)
__version__ = "1.0"
__status__ = "Development, 12.2.2013"
"""When calling this function some of the variables are optional and will not be written until lastIteration=True"""
def writeData(outputFile,obs_lat,obs_lon,obs_value,Nobs,survey_time,obs_time,obs_Xgrid,obs_Ygrid,
firstIteration,lastIteration,
obs_flag,obs_type,obs_error,obs_Zgrid,obs_depth,obs_variance,
survey,is3d,Nstate,USENETCDF4):
if USENETCDF4 is True:
myZLIB=True
myformat='NETCDF4'
else:
myZLIB=False
myformat='NETCDF3_CLASSIC'
if firstIteration is True:
f1 = Dataset(outputFile, mode='w', format=myformat)
f1.description="This is a obs file for SST"
f1.history = 'Created ' + time.ctime(time.time())
f1.source = 'Trond Kristiansen (trond.kristiansen@imr.no)'
f1.type='NetCDF4 using program createMapNS.py'
f1.options='Program requires: getCortad.py and writeObsfile.py'
""" Define dimensions """
f1.createDimension('one', 1)
f1.createDimension('state_variable', Nstate)
f1.createDimension('datum', None)
v_spherical = f1.createVariable('spherical', 'c', ('one',),zlib=myZLIB)
v_spherical.long_name = 'grid type logical switch'
v_spherical.option_T = "spherical"
v_spherical.option_F = "Cartesian"
v_spherical[:] = "T"
v_obs_type = f1.createVariable('obs_type', 'i', ('datum',),zlib=myZLIB)
v_obs_type.long_name = 'model state variable associated with observation'
v_obs_type.opt_1 ='free-surface'
v_obs_type.opt_2 ='vertically integrated u-momentum component';
v_obs_type.opt_3 ='vertically integrated v-momentum component';
v_obs_type.opt_4 ='u-momentum component'
v_obs_type.opt_5 ='v-momentum component'
v_obs_type.opt_6 ='potential temperature'
v_obs_type.opt_7 ='salinity'
v_obs_type[:] = obs_type
v_time = f1.createVariable('obs_time', 'd', ('datum',),zlib=myZLIB)
v_time.long_name = 'Time of observation'
v_time.units = 'days'
v_time.field = 'time, scalar, series'
v_time.calendar = 'standard'
v_time[:] = obs_time
v_obs_lon = f1.createVariable('obs_lon', 'd', ('datum',),zlib=myZLIB)
v_obs_lon.long_name = 'Longitude of observation'
v_obs_lon.units = 'degrees_east'
v_obs_lon.min = -180
v_obs_lon.max = 180
v_obs_lon[:] = obs_lon
v_obs_lat = f1.createVariable('obs_lat', 'd', ('datum',),zlib=myZLIB)
v_obs_lat.long_name = 'Latitude of observation'
v_obs_lat.units = 'degrees_north'
v_obs_lat.min = -90
v_obs_lat.max = 90
v_obs_lat[:] = obs_lat
v_obs_depth = f1.createVariable('obs_depth', 'd', ('datum',),zlib=myZLIB)
v_obs_depth.long_name = 'Depth of observation'
v_obs_depth.units = 'meter'
v_obs_depth.minus = 'downwards'
v_obs_depth[:] = obs_depth
v_obs_error = f1.createVariable('obs_error', 'd', ('datum',),zlib=myZLIB)
v_obs_error.long_name = 'Observation error covariance'
v_obs_error.units = 'squared state variable units'
v_obs_error[:] = obs_error
v_obs_val = f1.createVariable('obs_value', 'd', ('datum',),zlib=myZLIB)
v_obs_val.long_name = 'Observation value'
v_obs_val.units = 'state variable units'
v_obs_val[:] = obs_value
v_obs_xgrid = f1.createVariable('obs_Xgrid', 'd', ('datum',),zlib=myZLIB)
v_obs_xgrid.long_name = 'x-grid observation location'
v_obs_xgrid.units = 'nondimensional'
v_obs_xgrid.left = "INT(obs_Xgrid(datum))"
v_obs_xgrid.right = "INT(obs_Xgrid(datum))+1"
v_obs_xgrid[:] = obs_Xgrid
v_obs_ygrid = f1.createVariable('obs_Ygrid', 'd', ('datum',),zlib=myZLIB)
v_obs_ygrid.long_name = 'y-grid observation location'
v_obs_ygrid.units = 'nondimensional'
v_obs_ygrid.top = "INT(obs_Ygrid(datum))+1"
v_obs_ygrid.bottom = "INT(obs_Ygrid(datum))"
v_obs_ygrid[:] = obs_Ygrid
v_obs_zgrid = f1.createVariable('obs_Zgrid', 'd', ('datum',),zlib=myZLIB)
v_obs_zgrid.long_name = 'z-grid observation location'
v_obs_zgrid.units = 'nondimensional'
v_obs_zgrid.up = "INT(obs_Zgrid(datum))+1"
v_obs_zgrid.down = "INT(obs_Zgrid(datum))"
v_obs_zgrid[:] = obs_Zgrid
f1.close()
if firstIteration is False and lastIteration is False:
f1 = Dataset(outputFile, mode='a', format=myformat)
t0 = time.time()
"""Find index for ading new info to arrays (same for all variables)"""
myshape=f1.variables["obs_Zgrid"][:].shape
indexStart=myshape[0]
indexEnd=obs_Zgrid.shape[0]+myshape[0]
f1.variables["obs_type"][indexStart:indexEnd] = obs_type
f1.variables["obs_time"][indexStart:indexEnd] = obs_time
f1.variables["obs_lon"][indexStart:indexEnd] = obs_lon
f1.variables["obs_lat"][indexStart:indexEnd] = obs_lat
f1.variables["obs_depth"][indexStart:indexEnd] = obs_depth
f1.variables["obs_error"][indexStart:indexEnd] = obs_error
f1.variables["obs_value"][indexStart:indexEnd] = obs_value
f1.variables["obs_Xgrid"][indexStart:indexEnd] = obs_Xgrid
f1.variables["obs_Ygrid"][indexStart:indexEnd] = obs_Ygrid
f1.variables["obs_Zgrid"][indexStart:indexEnd] = obs_Zgrid
t1 = time.time()
print "array append created in %s seconds"%(t1-t0)
f1.close()
if lastIteration is True:
f1 = Dataset(outputFile, mode='a', format=myformat)
f1.createDimension('survey', survey)
v_obs = f1.createVariable('Nobs', 'i', ('survey',),zlib=myZLIB)
v_obs.long_name = 'number of observations with the same survey time'
v_obs.field = 'scalar, series'
v_obs[:] = Nobs
v_time = f1.createVariable('survey_time', 'i', ('survey',),zlib=myZLIB)
v_time.long_name = 'Survey time'
v_time.units = 'day'
v_time.field = 'time, scalar, series'
v_time.calendar = 'standard'
v_time[:] = survey_time
v_obs_var = f1.createVariable('obs_variance', 'd', ('state_variable',),zlib=myZLIB)
v_obs_var.long_name = 'global time and space observation variance'
v_obs_var.units = 'squared state variable units'
v_obs_var[:] = obs_variance
f1.close()
|
elianerpereira/gtg
|
refs/heads/master
|
GTG/backends/backend_mantis.py
|
2
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import os
import uuid
from GTG import _
from GTG.backends.genericbackend import GenericBackend
from GTG.backends.backendsignals import BackendSignals
from GTG.backends.periodicimportbackend import PeriodicImportBackend
from GTG.backends.syncengine import SyncEngine, SyncMeme
from GTG.tools.logger import Log
from GTG.core.task import Task
from suds.client import Client
'''
Backend for importing mantis issues in GTG
Dependencies:
* python-suds
'''
class Backend(PeriodicImportBackend):
_general_description = {
GenericBackend.BACKEND_NAME: "backend_mantis",
GenericBackend.BACKEND_HUMAN_NAME: _("MantisBT"),
GenericBackend.BACKEND_AUTHORS: ["Luca Invernizzi", "Alayn Gortazar"],
GenericBackend.BACKEND_TYPE: GenericBackend.TYPE_READONLY,
GenericBackend.BACKEND_DESCRIPTION:
_("This synchronization service lets you import the issues found"
" on Mantis using a prestablished filter called 'gtg'."
" As the issue state changes in Mantis, the GTG task is "
" updated.\n"
"Please note that this is a read only synchronization service,"
" which means that if you open one of the imported tasks and "
" change one of the:\n"
" - title\n"
" - description\n"
" - tags\n"
"Your changes <b>will</b> be reverted when the associated"
" issue is modified. Apart from those, you are free to set "
" any other field (start/due dates, subtasks...): your "
" changes will be preserved. This is useful to add "
" personal annotations to issue"),
}
_static_parameters = {
"period": {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_INT,
GenericBackend.PARAM_DEFAULT_VALUE: 5, },
"username": {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_STRING,
GenericBackend.PARAM_DEFAULT_VALUE: 'insert your username', },
"password": {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_PASSWORD,
GenericBackend.PARAM_DEFAULT_VALUE: '', },
"service-url": {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_STRING,
GenericBackend.PARAM_DEFAULT_VALUE: 'http://example.com/mantis',
},
"tag-with-project-name": {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_BOOL,
GenericBackend.PARAM_DEFAULT_VALUE: True},
}
def __init__(self, parameters):
'''
See GenericBackend for an explanation of this function.
Re-loads the saved state of the synchronization
'''
super(Backend, self).__init__(parameters)
# loading the saved state of the synchronization, if any
self.data_path = os.path.join('backends/mantis/',
"sync_engine-" + self.get_id())
self.sync_engine = self._load_pickled_file(self.data_path,
SyncEngine())
def save_state(self):
'''Saves the state of the synchronization'''
self._store_pickled_file(self.data_path, self.sync_engine)
def do_periodic_import(self):
# Establishing connection
try:
self.cancellation_point()
client = Client('%s/api/soap/mantisconnect.php?wsdl' %
(self._parameters['service-url']))
except KeyError:
self.quit(disable=True)
BackendSignals().backend_failed(self.get_id(),
BackendSignals.ERRNO_AUTHENTICATION
)
return
projects = client.service.mc_projects_get_user_accessible(
self._parameters['username'],
self._parameters['password'])
filters = client.service.mc_filter_get(self._parameters['username'],
self._parameters['password'], 0)
# Fetching the issues
self.cancellation_point()
my_issues = []
for filt in filters:
if filt['name'] == 'gtg':
for project in projects:
my_issues = client.service.mc_filter_get_issues(
self._parameters['username'],
self._parameters['password'],
project['id'],
filt['id'], 0, 100)
for issue in my_issues:
self.cancellation_point()
self._process_mantis_issue(issue)
last_issue_list = self.sync_engine.get_all_remote()
new_issue_list = [str(issue['id']) for issue in my_issues]
for issue_link in set(last_issue_list).difference(set(new_issue_list)):
self.cancellation_point()
# we make sure that the other backends are not modifying the task
# set
with self.datastore.get_backend_mutex():
tid = self.sync_engine.get_local_id(issue_link)
self.datastore.request_task_deletion(tid)
try:
self.sync_engine.break_relationship(remote_id=issue_link)
except KeyError:
pass
return
###############################################################################
### Process tasks #############################################################
###############################################################################
def _process_mantis_issue(self, issue):
'''
Given a issue object, finds out if it must be synced to a GTG note and,
if so, it carries out the synchronization (by creating or
updating a GTG task, or deleting itself if the related task has
been deleted)
@param note: a mantis issue
'''
has_task = self.datastore.has_task
action, tid = self.sync_engine.analyze_remote_id(str(issue['id']),
has_task,
lambda b: True)
Log.debug("processing mantis (%s)" % (action))
if action is None:
return
issue_dic = self._prefetch_issue_data(issue)
# for the rest of the function, no access to issue must be made, so
# that the time of blocking inside the with statements is short.
# To be sure of that, set issue to None
issue = None
with self.datastore.get_backend_mutex():
if action == SyncEngine.ADD:
tid = str(uuid.uuid4())
task = self.datastore.task_factory(tid)
self._populate_task(task, issue_dic)
self.sync_engine.record_relationship(local_id=tid,
remote_id=str(
issue_dic['number']),
meme=SyncMeme(
task.get_modified(),
issue_dic['modified'],
self.get_id()))
self.datastore.push_task(task)
elif action == SyncEngine.UPDATE:
task = self.datastore.get_task(tid)
self._populate_task(task, issue_dic)
meme = self.sync_engine.get_meme_from_remote_id(
issue_dic['number'])
meme.set_local_last_modified(task.get_modified())
meme.set_remote_last_modified(issue_dic['modified'])
self.save_state()
def _prefetch_issue_data(self, mantis_issue):
'''
We fetch all the necessary info that we need from the mantis_issue to
populate a task beforehand (these will be used in _populate_task).
@param mantis_issue: a mantis issue
@returns dict: a dictionary containing the relevant issue attributes
'''
issue_dic = {'title': mantis_issue['summary'],
'text': mantis_issue['description'],
'reporter': mantis_issue['reporter'].name,
'modified': mantis_issue['last_updated'],
'project': mantis_issue['project'].name,
'status': mantis_issue['status'].name,
'completed': (mantis_issue['status'].id >= 80),
'number': str(mantis_issue['id'])}
try:
issue_dic['assigned'] = mantis_issue['handler'].name == \
self._parameters['username']
except AttributeError:
issue_dic['assigned'] = False
return issue_dic
def _populate_task(self, task, issue_dic):
'''
Fills a GTG task with the data from a mantis issue.
@param task: a Task
@param issue_dic: a mantis issue
'''
# set task status
if issue_dic["completed"]:
task.set_status(Task.STA_DONE)
else:
task.set_status(Task.STA_ACTIVE)
if task.get_title() != issue_dic['title']:
task.set_title(_("Iss.") + " %s: " % issue_dic["number"]
+ issue_dic['title'])
text = self._build_issue_text(issue_dic)
if task.get_excerpt() != text:
task.set_text(text)
new_tags = set([])
if self._parameters["tag-with-project-name"]:
new_tags = set(['@' + issue_dic['project']])
current_tags = set(task.get_tags_name())
# add the new ones
for tag in new_tags.difference(current_tags):
task.add_tag(tag)
task.add_remote_id(self.get_id(), issue_dic['number'])
def _build_issue_text(self, issue_dic):
'''
Creates the text that describes a issue
'''
text = _("Reported by: ") + issue_dic["reporter"] + '\n'
text += _("Link to issue: ") + \
self._parameters['service-url'] + '/view.php?id=%s' % \
(issue_dic["number"]) + '\n'
text += '\n' + issue_dic["text"]
return text
|
was4444/chromium.src
|
refs/heads/nw15
|
native_client_sdk/src/build_tools/build_updater.py
|
51
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Build script to generate a new sdk_tools bundle.
This script packages the files necessary to generate the SDK updater -- the
tool users run to download new bundles, update existing bundles, etc.
"""
import argparse
import buildbot_common
import build_version
import glob
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SDK_SRC_DIR = os.path.dirname(SCRIPT_DIR)
SDK_DIR = os.path.dirname(SDK_SRC_DIR)
SRC_DIR = os.path.dirname(SDK_DIR)
NACL_DIR = os.path.join(SRC_DIR, 'native_client')
CYGTAR = os.path.join(NACL_DIR, 'build', 'cygtar.py')
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
import oshelpers
UPDATER_FILES = [
# launch scripts
('build_tools/naclsdk', 'nacl_sdk/naclsdk'),
('build_tools/naclsdk.bat', 'nacl_sdk/naclsdk.bat'),
# base manifest
('build_tools/json/naclsdk_manifest0.json',
'nacl_sdk/sdk_cache/naclsdk_manifest2.json'),
# SDK tools
('build_tools/sdk_tools/cacerts.txt', 'nacl_sdk/sdk_tools/cacerts.txt'),
('build_tools/sdk_tools/*.py', 'nacl_sdk/sdk_tools/'),
('build_tools/sdk_tools/command/*.py', 'nacl_sdk/sdk_tools/command/'),
('build_tools/sdk_tools/third_party/*.py', 'nacl_sdk/sdk_tools/third_party/'),
('build_tools/sdk_tools/third_party/fancy_urllib/*.py',
'nacl_sdk/sdk_tools/third_party/fancy_urllib/'),
('build_tools/sdk_tools/third_party/fancy_urllib/README.chromium',
'nacl_sdk/sdk_tools/third_party/fancy_urllib/README.chromium'),
('build_tools/manifest_util.py', 'nacl_sdk/sdk_tools/manifest_util.py'),
('LICENSE', 'nacl_sdk/sdk_tools/LICENSE'),
(CYGTAR, 'nacl_sdk/sdk_tools/cygtar.py'),
]
def MakeUpdaterFilesAbsolute(out_dir):
"""Return the result of changing all relative paths in UPDATER_FILES to
absolute paths.
Args:
out_dir: The output directory.
Returns:
A list of 2-tuples. The first element in each tuple is the source path and
the second is the destination path.
"""
assert os.path.isabs(out_dir)
result = []
for in_file, out_file in UPDATER_FILES:
if not os.path.isabs(in_file):
in_file = os.path.join(SDK_SRC_DIR, in_file)
out_file = os.path.join(out_dir, out_file)
result.append((in_file, out_file))
return result
def GlobFiles(files):
"""Expand wildcards for 2-tuples of sources/destinations.
This function also will convert destinations from directories into filenames.
For example:
('foo/*.py', 'bar/') => [('foo/a.py', 'bar/a.py'), ('foo/b.py', 'bar/b.py')]
Args:
files: A list of 2-tuples of (source, dest) paths.
Returns:
A new list of 2-tuples, after the sources have been wildcard-expanded, and
the destinations have been changed from directories to filenames.
"""
result = []
for in_file_glob, out_file in files:
if out_file.endswith('/'):
for in_file in glob.glob(in_file_glob):
result.append((in_file,
os.path.join(out_file, os.path.basename(in_file))))
else:
result.append((in_file_glob, out_file))
return result
def CopyFiles(files):
"""Given a list of 2-tuples (source, dest), copy each source file to a dest
file.
Args:
files: A list of 2-tuples."""
for in_file, out_file in files:
buildbot_common.MakeDir(os.path.dirname(out_file))
buildbot_common.CopyFile(in_file, out_file)
def UpdateRevisionNumber(out_dir, revision_number):
"""Update the sdk_tools bundle to have the given revision number.
This function finds all occurrences of the string "{REVISION}" in
sdk_update_main.py and replaces them with |revision_number|. The only
observable effect of this change should be that running:
naclsdk -v
will contain the new |revision_number|.
Args:
out_dir: The output directory containing the scripts to update.
revision_number: The revision number as an integer, or None to use the
current Chrome revision (as retrieved through svn/git).
"""
if revision_number is None:
revision_number = build_version.ChromeRevision()
SDK_UPDATE_MAIN = os.path.join(out_dir,
'nacl_sdk/sdk_tools/sdk_update_main.py')
contents = open(SDK_UPDATE_MAIN, 'r').read().replace(
'{REVISION}', str(revision_number))
open(SDK_UPDATE_MAIN, 'w').write(contents)
def BuildUpdater(out_dir, revision_number=None):
"""Build naclsdk.zip and sdk_tools.tgz in |out_dir|.
Args:
out_dir: The output directory.
revision_number: The revision number of this updater, as an integer. Or
None, to use the current Chrome revision."""
out_dir = os.path.abspath(out_dir)
# Build SDK directory
buildbot_common.RemoveDir(os.path.join(out_dir, 'nacl_sdk'))
updater_files = MakeUpdaterFilesAbsolute(out_dir)
updater_files = GlobFiles(updater_files)
CopyFiles(updater_files)
UpdateRevisionNumber(out_dir, revision_number)
out_files = [os.path.relpath(out_file, out_dir)
for _, out_file in updater_files]
# Make zip
buildbot_common.RemoveFile(os.path.join(out_dir, 'nacl_sdk.zip'))
buildbot_common.Run([sys.executable, oshelpers.__file__, 'zip',
'nacl_sdk.zip'] + out_files,
cwd=out_dir)
# Tar of all files under nacl_sdk/sdk_tools
sdktoolsdir = os.path.join('nacl_sdk', 'sdk_tools')
tarname = os.path.join(out_dir, 'sdk_tools.tgz')
files_to_tar = [os.path.relpath(out_file, sdktoolsdir)
for out_file in out_files if out_file.startswith(sdktoolsdir)]
buildbot_common.RemoveFile(tarname)
buildbot_common.Run([sys.executable, CYGTAR, '-C',
os.path.join(out_dir, sdktoolsdir), '-czf', tarname] + files_to_tar)
sys.stdout.write('\n')
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-o', '--out', help='output directory',
dest='out_dir', default=os.path.join(SRC_DIR, 'out'))
parser.add_argument('-r', '--revision', dest='revision', default=None,
help='revision number of this updater')
parser.add_argument('-v', '--verbose', help='verbose output')
options = parser.parse_args(args)
buildbot_common.verbose = options.verbose
if options.revision:
options.revision = int(options.revision)
BuildUpdater(options.out_dir, options.revision)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Fireblend/chromium-crosswalk
|
refs/heads/master
|
build/android/pylib/device/device_list.py
|
114
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module to keep track of devices across builds."""
import os
LAST_DEVICES_FILENAME = '.last_devices'
LAST_MISSING_DEVICES_FILENAME = '.last_missing'
def GetPersistentDeviceList(file_name):
"""Returns a list of devices.
Args:
file_name: the file name containing a list of devices.
Returns: List of device serial numbers that were on the bot.
"""
with open(file_name) as f:
return f.read().splitlines()
def WritePersistentDeviceList(file_name, device_list):
path = os.path.dirname(file_name)
if not os.path.exists(path):
os.makedirs(path)
with open(file_name, 'w') as f:
f.write('\n'.join(set(device_list)))
|
fasrc/lilpsp
|
refs/heads/master
|
lilpsp/config.py
|
1
|
"""
basic configuration
DESCRIPTION
Paremeters controlling how the site functions.
This file is intended to be modified. See comments below for details on
each option. See the README for a higer-level overview.
REQUIREMENTS
n/a
AUTHOR
Copyright (c) 2011-2013
Harvard FAS Research Computing
John Brunelle <john_brunelle@harvard.edu>
All right reserved.
"""
import os, re
#LOG_FILE -- the absolute path of the log file to use
#The apache user (or whatever user under which the web server is running) must
#be able to write to it, or create if it does not exist (see also LOG_FILE_MODE
#below). This default is to take the name of the directory containing all the
#psp, html, and the lilpsp python package, and use that as the base name of the
#log file. Note that any failures writing to this log are ignored.
LOG_FILE = '/var/tmp/%s.log' % os.path.basename(os.path.normpath(os.path.join(os.path.dirname(__file__),'..')))
#LOG_FILE_MODE -- the permissions of the log file, if this creates it
#This has no effect if the file already exists.
LOG_FILE_MODE = 0600
#DEBUG -- boolean for whether or not to include full details in Exceptions and log messages
#WARNING: True may cause tracebacks, shell command output, and other secrets to
#be included in the Exceptions that are raised. Only use True in production if
#your log is secure and you're confident all calling code catches Exceptions.
DEBUG = True
#AUTH_TYPE -- what type of authentication to use
#choose one of:
# 'NONE' -- don't require anything
# 'HTTP' -- leave it to apache (i.e. rely on req.user)
# 'FORM' -- present a form to the user (login.psp) and authenticate creds using org.authenticateUser()
#If you choose 'FORM', you must implement org.authenticateUser(). Each psp
#page must call sessionCheck() in order for this to be respected. See the
#README for full details.
AUTH_TYPE = 'NONE'
#RE_VALID_EMAIL_ADDRESS -- filter for allowable email addresses
#This is only applicable if you add code that calls core.sendEmail(). This
#expression is lax by default, allowing just plain usernames (so that the
#system emails the account); tighten if desired. All email addresses are
#properly quoted/escaped when passed to other programs, regardless of the
#expression here.
RE_VALID_EMAIL_ADDRESS = re.compile('^[a-zA-Z0-9_\-.+%@]+$')
|
bwh0/stuff
|
refs/heads/master
|
website/Farooqui Conjecture.py
|
1
|
# Farooqui Conjecture.py
# Imaad Farooqui
# Farooqui Conjecture LCM
# Modules
import math
from time import sleep
# LCM Function
def LCM(num1, num2):
stop = 0 # Set Variables
# Find biggest value
if(num1>num2):
maximum = math.ceil(0.5 * num1)
bigger = 1
else:
maximum = math.ceil(0.5 * num2)
bigger = 2
# Check if x goes into y
if(bigger == 1):
if(num1 % num2 == 0):
x = num1
print(x)
stop = 1
else:
if(num2 % num1 == 0):
x = num2
print(x)
stop = 1
while(stop == 0): #Forever Loop
if(num1 % maximum == 0 and num2 % maximum == 0): # Check for common divisor
a = maximum
break
maximum = maximum - 1 # Decrease value of maximum
if(maximum == 1):
a = 1
break
# Output if not already calculated
if(stop == 0): # If not already calculated
if(a == 1):
print(num1 * num2) # x * y
else:
print((num1 / a) * (num2 / a) * (a)) # x * y * a
while(1 == 1): #Forever Loop
# Variable Setup
choicea = 0
choiceb = 0
# User Input
## Choice 1
while(choicea == 0 or choicea < 1 or choicea % 1 != 0):
choicea = float(input("Choose you first interger.\n"))
## Choice 2
while(choiceb == 0 or choiceb < 1 or choiceb % 1 != 0):
choiceb = float(input("Choose you second interger.\n"))
# Perform LCM Function
LCM(choicea, choiceb)
|
pothosware/gr-runtime
|
refs/heads/master
|
gnuradio-runtime/python/gnuradio/gr/pubsub.py
|
78
|
#!/usr/bin/env python
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Abstract GNU Radio publisher/subscriber interface
This is a proof of concept implementation, will likely change significantly.
"""
class pubsub(dict):
def __init__(self):
self._publishers = { }
self._subscribers = { }
self._proxies = { }
def __missing__(self, key, value=None):
dict.__setitem__(self, key, value)
self._publishers[key] = None
self._subscribers[key] = []
self._proxies[key] = None
def __setitem__(self, key, val):
if not self.has_key(key):
self.__missing__(key, val)
elif self._proxies[key] is not None:
(p, newkey) = self._proxies[key]
p[newkey] = val
else:
dict.__setitem__(self, key, val)
for sub in self._subscribers[key]:
# Note this means subscribers will get called in the thread
# context of the 'set' caller.
sub(val)
def __getitem__(self, key):
if not self.has_key(key): self.__missing__(key)
if self._proxies[key] is not None:
(p, newkey) = self._proxies[key]
return p[newkey]
elif self._publishers[key] is not None:
return self._publishers[key]()
else:
return dict.__getitem__(self, key)
def publish(self, key, publisher):
if not self.has_key(key): self.__missing__(key)
if self._proxies[key] is not None:
(p, newkey) = self._proxies[key]
p.publish(newkey, publisher)
else:
self._publishers[key] = publisher
def subscribe(self, key, subscriber):
if not self.has_key(key): self.__missing__(key)
if self._proxies[key] is not None:
(p, newkey) = self._proxies[key]
p.subscribe(newkey, subscriber)
else:
self._subscribers[key].append(subscriber)
def unpublish(self, key):
if self._proxies[key] is not None:
(p, newkey) = self._proxies[key]
p.unpublish(newkey)
else:
self._publishers[key] = None
def unsubscribe(self, key, subscriber):
if self._proxies[key] is not None:
(p, newkey) = self._proxies[key]
p.unsubscribe(newkey, subscriber)
else:
self._subscribers[key].remove(subscriber)
def proxy(self, key, p, newkey=None):
if not self.has_key(key): self.__missing__(key)
if newkey is None: newkey = key
self._proxies[key] = (p, newkey)
def unproxy(self, key):
self._proxies[key] = None
# Test code
if __name__ == "__main__":
import sys
o = pubsub()
# Non-existent key gets auto-created with None value
print "Auto-created key 'foo' value:", o['foo']
# Add some subscribers
# First is a bare function
def print_len(x):
print "len=%i" % (len(x), )
o.subscribe('foo', print_len)
# The second is a class member function
class subber(object):
def __init__(self, param):
self._param = param
def printer(self, x):
print self._param, `x`
s = subber('param')
o.subscribe('foo', s.printer)
# The third is a lambda function
o.subscribe('foo', lambda x: sys.stdout.write('val='+`x`+'\n'))
# Update key 'foo', will notify subscribers
print "Updating 'foo' with three subscribers:"
o['foo'] = 'bar';
# Remove first subscriber
o.unsubscribe('foo', print_len)
# Update now will only trigger second and third subscriber
print "Updating 'foo' after removing a subscriber:"
o['foo'] = 'bar2';
# Publish a key as a function, in this case, a lambda function
o.publish('baz', lambda : 42)
print "Published value of 'baz':", o['baz']
# Unpublish the key
o.unpublish('baz')
# This will return None, as there is no publisher
print "Value of 'baz' with no publisher:", o['baz']
# Set 'baz' key, it gets cached
o['baz'] = 'bazzz'
# Now will return cached value, since no provider
print "Cached value of 'baz' after being set:", o['baz']
|
hack4impact/asylum-connect-catalog
|
refs/heads/master
|
app/bulk_resource/forms.py
|
2
|
from flask.ext.wtf import Form
from flask_wtf.file import InputRequired
from wtforms.fields import (FieldList, FormField, RadioField, SelectField,
SelectMultipleField, SubmitField)
class NavigationForm(Form):
submit_next = SubmitField('Next')
submit_cancel = SubmitField('Cancel')
submit_back = SubmitField('Back')
class DetermineRequiredOptionDescriptorForm(Form):
required_option_descriptor = SelectField(
'Required Option Descriptor', validators=[InputRequired()])
navigation = FormField(NavigationForm)
class DetermineDescriptorTypesForm(Form):
descriptor_types = FieldList(
RadioField(
choices=[('text', 'Text'), ('option', 'Option')],
validators=[InputRequired()]))
navigation = FormField(NavigationForm)
class RequiredOptionDescriptorMissingForm(Form):
resources = FieldList(SelectMultipleField(validators=[InputRequired()]))
navigation = FormField(NavigationForm)
class DetermineOptionsForm(Form):
navigation = FormField(NavigationForm)
class SaveCsvDataForm(Form):
submit = SubmitField('Save')
submit_cancel = SubmitField('Cancel')
submit_back = SubmitField('Back')
|
mglukhikh/intellij-community
|
refs/heads/master
|
python/testData/stubs/FullyQualifiedTypingNamedTupleKwargsWithAs.py
|
27
|
import typing as T
nt = T.NamedTuple("name", field=str)
|
abstract-open-solutions/OCB
|
refs/heads/8.0
|
addons/auth_signup/res_users.py
|
90
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from datetime import datetime, timedelta
import random
from urlparse import urljoin
import werkzeug
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import osv, fields
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT, ustr
from ast import literal_eval
from openerp.tools.translate import _
class SignupError(Exception):
pass
def random_token():
# the token has an entropy of about 120 bits (6 bits/char * 20 chars)
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.choice(chars) for i in xrange(20))
def now(**kwargs):
dt = datetime.now() + timedelta(**kwargs)
return dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
class res_partner(osv.Model):
_inherit = 'res.partner'
def _get_signup_valid(self, cr, uid, ids, name, arg, context=None):
dt = now()
res = {}
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = bool(partner.signup_token) and \
(not partner.signup_expiration or dt <= partner.signup_expiration)
return res
def _get_signup_url_for_action(self, cr, uid, ids, action=None, view_type=None, menu_id=None, res_id=None, model=None, context=None):
""" generate a signup url for the given partner ids and action, possibly overriding
the url state components (menu_id, id, view_type) """
if context is None:
context= {}
res = dict.fromkeys(ids, False)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
for partner in self.browse(cr, uid, ids, context):
# when required, make sure the partner has a valid signup token
if context.get('signup_valid') and not partner.user_ids:
self.signup_prepare(cr, uid, [partner.id], context=context)
route = 'login'
# the parameters to encode for the query
query = dict(db=cr.dbname)
signup_type = context.get('signup_force_type_in_url', partner.signup_type or '')
if signup_type:
route = 'reset_password' if signup_type == 'reset' else signup_type
if partner.signup_token and signup_type:
query['token'] = partner.signup_token
elif partner.user_ids:
query['login'] = partner.user_ids[0].login
else:
continue # no signup token, no user, thus no signup url!
fragment = dict()
if action:
fragment['action'] = action
if view_type:
fragment['view_type'] = view_type
if menu_id:
fragment['menu_id'] = menu_id
if model:
fragment['model'] = model
if res_id:
fragment['id'] = res_id
if fragment:
query['redirect'] = '/web#' + werkzeug.url_encode(fragment)
res[partner.id] = urljoin(base_url, "/web/%s?%s" % (route, werkzeug.url_encode(query)))
return res
def _get_signup_url(self, cr, uid, ids, name, arg, context=None):
""" proxy for function field towards actual implementation """
return self._get_signup_url_for_action(cr, uid, ids, context=context)
_columns = {
'signup_token': fields.char('Signup Token', copy=False),
'signup_type': fields.char('Signup Token Type', copy=False),
'signup_expiration': fields.datetime('Signup Expiration', copy=False),
'signup_valid': fields.function(_get_signup_valid, type='boolean', string='Signup Token is Valid'),
'signup_url': fields.function(_get_signup_url, type='char', string='Signup URL'),
}
def action_signup_prepare(self, cr, uid, ids, context=None):
return self.signup_prepare(cr, uid, ids, context=context)
def signup_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'signup_token': False, 'signup_type': False, 'signup_expiration': False}, context=context)
def signup_prepare(self, cr, uid, ids, signup_type="signup", expiration=False, context=None):
""" generate a new token for the partners with the given validity, if necessary
:param expiration: the expiration datetime of the token (string, optional)
"""
for partner in self.browse(cr, uid, ids, context):
if expiration or not partner.signup_valid:
token = random_token()
while self._signup_retrieve_partner(cr, uid, token, context=context):
token = random_token()
partner.write({'signup_token': token, 'signup_type': signup_type, 'signup_expiration': expiration})
return True
def _signup_retrieve_partner(self, cr, uid, token,
check_validity=False, raise_exception=False, context=None):
""" find the partner corresponding to a token, and possibly check its validity
:param token: the token to resolve
:param check_validity: if True, also check validity
:param raise_exception: if True, raise exception instead of returning False
:return: partner (browse record) or False (if raise_exception is False)
"""
partner_ids = self.search(cr, uid, [('signup_token', '=', token)], context=context)
if not partner_ids:
if raise_exception:
raise SignupError("Signup token '%s' is not valid" % token)
return False
partner = self.browse(cr, uid, partner_ids[0], context)
if check_validity and not partner.signup_valid:
if raise_exception:
raise SignupError("Signup token '%s' is no longer valid" % token)
return False
return partner
def signup_retrieve_info(self, cr, uid, token, context=None):
""" retrieve the user info about the token
:return: a dictionary with the user information:
- 'db': the name of the database
- 'token': the token, if token is valid
- 'name': the name of the partner, if token is valid
- 'login': the user login, if the user already exists
- 'email': the partner email, if the user does not exist
"""
partner = self._signup_retrieve_partner(cr, uid, token, raise_exception=True, context=None)
res = {'db': cr.dbname}
if partner.signup_valid:
res['token'] = token
res['name'] = partner.name
if partner.user_ids:
res['login'] = partner.user_ids[0].login
else:
res['email'] = partner.email or ''
return res
class res_users(osv.Model):
_inherit = 'res.users'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
for user in self.browse(cr, uid, ids, context):
res[user.id] = ('active' if user.login_date else 'new')
return res
_columns = {
'state': fields.function(_get_state, string='Status', type='selection',
selection=[('new', 'Never Connected'), ('active', 'Activated')]),
}
def signup(self, cr, uid, values, token=None, context=None):
""" signup a user, to either:
- create a new user (no token), or
- create a user for a partner (with token, but no user for partner), or
- change the password of a user (with token, and existing user).
:param values: a dictionary with field values that are written on user
:param token: signup token (optional)
:return: (dbname, login, password) for the signed up user
"""
if token:
# signup with a token: find the corresponding partner id
res_partner = self.pool.get('res.partner')
partner = res_partner._signup_retrieve_partner(
cr, uid, token, check_validity=True, raise_exception=True, context=None)
# invalidate signup token
partner.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
partner_user = partner.user_ids and partner.user_ids[0] or False
# avoid overwriting existing (presumably correct) values with geolocation data
if partner.country_id or partner.zip or partner.city:
values.pop('city', None)
values.pop('country_id', None)
if partner.lang:
values.pop('lang', None)
if partner_user:
# user exists, modify it according to values
values.pop('login', None)
values.pop('name', None)
partner_user.write(values)
return (cr.dbname, partner_user.login, values.get('password'))
else:
# user does not exist: sign up invited user
values.update({
'name': partner.name,
'partner_id': partner.id,
'email': values.get('email') or values.get('login'),
})
if partner.company_id:
values['company_id'] = partner.company_id.id
values['company_ids'] = [(6, 0, [partner.company_id.id])]
self._signup_create_user(cr, uid, values, context=context)
else:
# no token, sign up an external user
values['email'] = values.get('email') or values.get('login')
self._signup_create_user(cr, uid, values, context=context)
return (cr.dbname, values.get('login'), values.get('password'))
def _signup_create_user(self, cr, uid, values, context=None):
""" create a new user from the template user """
ir_config_parameter = self.pool.get('ir.config_parameter')
template_user_id = literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.template_user_id', 'False'))
assert template_user_id and self.exists(cr, uid, template_user_id, context=context), 'Signup: invalid template user'
# check that uninvited users may sign up
if 'partner_id' not in values:
if not literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')):
raise SignupError('Signup is not allowed for uninvited users')
assert values.get('login'), "Signup: no login given for new user"
assert values.get('partner_id') or values.get('name'), "Signup: no name or partner given for new user"
# create a copy of the template user (attached to a specific partner_id if given)
values['active'] = True
context = dict(context or {}, no_reset_password=True)
try:
with cr.savepoint():
return self.copy(cr, uid, template_user_id, values, context=context)
except Exception, e:
# copy may failed if asked login is not available.
raise SignupError(ustr(e))
def reset_password(self, cr, uid, login, context=None):
""" retrieve the user corresponding to login (login or email),
and reset their password
"""
user_ids = self.search(cr, uid, [('login', '=', login)], context=context)
if not user_ids:
user_ids = self.search(cr, uid, [('email', '=', login)], context=context)
if len(user_ids) != 1:
raise Exception('Reset password: invalid username or email')
return self.action_reset_password(cr, uid, user_ids, context=context)
def action_reset_password(self, cr, uid, ids, context=None):
""" create signup token for each user, and send their signup url by email """
# prepare reset password signup
res_partner = self.pool.get('res.partner')
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context)]
res_partner.signup_prepare(cr, uid, partner_ids, signup_type="reset", expiration=now(days=+1), context=context)
if not context:
context = {}
# send email to users with their signup url
template = False
if context.get('create_user'):
try:
# get_object() raises ValueError if record does not exist
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'set_password_email')
except ValueError:
pass
if not bool(template):
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'reset_password_email')
assert template._name == 'email.template'
for user in self.browse(cr, uid, ids, context):
if not user.email:
raise osv.except_osv(_("Cannot send email: user has no email address."), user.name)
self.pool.get('email.template').send_mail(cr, uid, template.id, user.id, force_send=True, raise_exception=True, context=context)
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
# overridden to automatically invite user to sign up
user_id = super(res_users, self).create(cr, uid, values, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.email and not context.get('no_reset_password'):
context = dict(context, create_user=True)
try:
self.action_reset_password(cr, uid, [user.id], context=context)
except MailDeliveryException:
self.pool.get('res.partner').signup_cancel(cr, uid, [user.partner_id.id], context=context)
return user_id
|
RPi-Distro/python-energenie
|
refs/heads/master
|
examples/simple/on_off.py
|
3
|
from energenie import switch_on, switch_off
from time import sleep
while True:
print("switching on...")
switch_on()
sleep(2)
print("switching off...")
switch_off()
sleep(2)
|
aral/isvat
|
refs/heads/master
|
django/contrib/auth/forms.py
|
19
|
from __future__ import unicode_literals
from django import forms
from django.forms.util import flatatt
from django.template import loader
from django.utils.datastructures import SortedDict
from django.utils.html import format_html, format_html_join
from django.utils.http import urlsafe_base64_encode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.hashers import (
MAXIMUM_PASSWORD_LENGTH, UNUSABLE_PASSWORD, identify_hasher,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
UNMASKED_DIGITS_TO_SHOW = 6
mask_password = lambda p: "%s%s" % (p[:UNMASKED_DIGITS_TO_SHOW], "*" * max(len(p) - UNMASKED_DIGITS_TO_SHOW, 0))
class ReadOnlyPasswordHashWidget(forms.Widget):
def render(self, name, value, attrs):
encoded = value
final_attrs = self.build_attrs(attrs)
if not encoded or encoded == UNUSABLE_PASSWORD:
summary = mark_safe("<strong>%s</strong>" % ugettext("No password set."))
else:
try:
hasher = identify_hasher(encoded)
except ValueError:
summary = mark_safe("<strong>%s</strong>" % ugettext(
"Invalid password format or unknown hashing algorithm."))
else:
summary = format_html_join('',
"<strong>{0}</strong>: {1} ",
((ugettext(key), value)
for key, value in hasher.safe_summary(encoded).items())
)
return format_html("<div{0}>{1}</div>", flatatt(final_attrs), summary)
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyPasswordHashField, self).__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput, max_length=MAXIMUM_PASSWORD_LENGTH)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
max_length=MAXIMUM_PASSWORD_LENGTH,
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(
label=_("Username"), max_length=30, regex=r"^[\w.@+-]+$",
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=254)
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput,
max_length=MAXIMUM_PASSWORD_LENGTH,
)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'no_cookies': _("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if not self.fields['username'].label:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'] % {
'username': self.username_field.verbose_name
})
elif not self.user_cache.is_active:
raise forms.ValidationError(self.error_messages['inactive'])
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
error_messages = {
'unknown': _("That email address doesn't have an associated "
"user account. Are you sure you've registered?"),
'unusable': _("The user account associated with this email "
"address cannot reset the password."),
}
email = forms.EmailField(label=_("Email"), max_length=254)
def clean_email(self):
"""
Validates that an active user exists with the given email address.
"""
UserModel = get_user_model()
email = self.cleaned_data["email"]
self.users_cache = UserModel._default_manager.filter(email__iexact=email)
if not len(self.users_cache):
raise forms.ValidationError(self.error_messages['unknown'])
if not any(user.is_active for user in self.users_cache):
# none of the filtered users are active
raise forms.ValidationError(self.error_messages['unknown'])
if any((user.password == UNUSABLE_PASSWORD)
for user in self.users_cache):
raise forms.ValidationError(self.error_messages['unusable'])
return email
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(str(user.id)),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
send_mail(subject, email, from_email, [user.email])
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without entering the
old password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(
label=_("New password"),
widget=forms.PasswordInput,
max_length=MAXIMUM_PASSWORD_LENGTH,
)
new_password2 = forms.CharField(
label=_("New password confirmation"),
widget=forms.PasswordInput,
max_length=MAXIMUM_PASSWORD_LENGTH,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
})
old_password = forms.CharField(
label=_("Old password"),
widget=forms.PasswordInput,
max_length=MAXIMUM_PASSWORD_LENGTH,
)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'])
return old_password
PasswordChangeForm.base_fields = SortedDict([
(k, PasswordChangeForm.base_fields[k])
for k in ['old_password', 'new_password1', 'new_password2']
])
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput,
max_length=MAXIMUM_PASSWORD_LENGTH,
)
password2 = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput,
max_length=MAXIMUM_PASSWORD_LENGTH,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
|
atopuzov/nitro-python
|
refs/heads/master
|
nssrc/com/citrix/netscaler/nitro/resource/stat/pq/pq_stats.py
|
3
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class pq_stats(base_resource) :
def __init__(self) :
self._clearstats = ""
self._pqtotalpolicymatches = 0
self._pqpolicymatchesrate = 0
self._pqtotalthresholdfailed = 0
self._pqthresholdfailedrate = 0
self._pqpriority1requests = 0
self._pqpriority1requestsrate = 0
self._pqpriority2requests = 0
self._pqpriority2requestsrate = 0
self._pqpriority3requests = 0
self._pqpriority3requestsrate = 0
@property
def clearstats(self) :
ur"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
ur"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def pqpriority2requestsrate(self) :
ur"""Rate (/s) counter for pqpriority2requests.
"""
try :
return self._pqpriority2requestsrate
except Exception as e:
raise e
@property
def pqpolicymatchesrate(self) :
ur"""Rate (/s) counter for pqtotalpolicymatches.
"""
try :
return self._pqpolicymatchesrate
except Exception as e:
raise e
@property
def pqpriority1requestsrate(self) :
ur"""Rate (/s) counter for pqpriority1requests.
"""
try :
return self._pqpriority1requestsrate
except Exception as e:
raise e
@property
def pqthresholdfailedrate(self) :
ur"""Rate (/s) counter for pqtotalthresholdfailed.
"""
try :
return self._pqthresholdfailedrate
except Exception as e:
raise e
@property
def pqtotalpolicymatches(self) :
ur"""Number of times the Netscaler appliance matched an incoming request using any priority queuing policy.
"""
try :
return self._pqtotalpolicymatches
except Exception as e:
raise e
@property
def pqpriority1requests(self) :
ur"""Number of priority 1 requests that the Netscaler appliance received.
"""
try :
return self._pqpriority1requests
except Exception as e:
raise e
@property
def pqpriority3requestsrate(self) :
ur"""Rate (/s) counter for pqpriority3requests.
"""
try :
return self._pqpriority3requestsrate
except Exception as e:
raise e
@property
def pqpriority3requests(self) :
ur"""Number of priority 3 requests that the Netscaler appliance received.
"""
try :
return self._pqpriority3requests
except Exception as e:
raise e
@property
def pqpriority2requests(self) :
ur"""Number of priority 2 requests that the Netscaler appliance received.
"""
try :
return self._pqpriority2requests
except Exception as e:
raise e
@property
def pqtotalthresholdfailed(self) :
ur"""Number of times the Netscaler appliance failed to match an incoming request to any of priority queing policy.
"""
try :
return self._pqtotalthresholdfailed
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(pq_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.pq
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
ur""" Use this API to fetch the statistics of all pq_stats resources that are configured on netscaler.
"""
try :
obj = pq_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class pq_response(base_response) :
def __init__(self, length=1) :
self.pq = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.pq = [pq_stats() for _ in range(length)]
|
raju249/oppia
|
refs/heads/develop
|
scripts/build.py
|
5
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
import os
import re
import shutil
import subprocess
import sys
import yaml
HEAD_DIR = os.path.join('core', 'templates', 'dev', 'head', '')
OUT_DIR = os.path.join('core', 'templates', 'prod', 'head', '')
REMOVE_WS = re.compile(r'\s{2,}').sub
YUICOMPRESSOR_DIR = os.path.join(
'..', 'oppia_tools', 'yuicompressor-2.4.8', 'yuicompressor-2.4.8.jar')
FILE_EXTENSIONS_TO_IGNORE = ['.py']
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path."""
cmd = 'java -jar %s %s -o %s' % (
YUICOMPRESSOR_DIR, source_path, target_path)
subprocess.check_call(cmd, shell=True)
def ensure_directory_exists(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def process_html(filename, target):
f = open(filename, 'r')
content = f.read()
content = REMOVE_WS(' ', content)
ensure_directory_exists(target)
d = open(target, 'w+')
d.write(content)
def process_css(source_path, target_path):
ensure_directory_exists(target_path)
_minify(source_path, target_path)
def process_js(source_path, target_path):
ensure_directory_exists(target_path)
_minify(source_path, target_path)
def build_minified_third_party_libs(output_directory):
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
node_path = os.path.join(
parent_dir, 'oppia_tools', 'node-4.2.1', 'bin', 'node')
gulp_path = os.path.join(
parent_dir, 'node_modules', 'gulp', 'bin', 'gulp.js')
gulp_build_cmd = [node_path, gulp_path, 'build', '--minify=True',
'--output_directory=%s' % output_directory]
proc = subprocess.Popen(
gulp_build_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gulp_stdout, gulp_stderr = proc.communicate()
if gulp_stdout:
print gulp_stdout
if gulp_stderr:
print 'Gulp build process failed.Exiting'
print '----------------------------------------'
print gulp_stderr
sys.exit(1)
def copy_files_source_to_target(source, target):
"""Copies all files in source directory to target."""
print 'Processing %s' % os.path.join(os.getcwd(), source)
print 'Copying into %s' % os.path.join(os.getcwd(), target)
ensure_directory_exists(target)
shutil.rmtree(target)
for root, dirs, files in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirs:
print 'Processing %s' % os.path.join(root, directory)
for filename in files:
source_path = os.path.join(root, filename)
if target in source_path:
continue
if source not in source_path:
continue
target_path = source_path.replace(
source, target)
ensure_directory_exists(target_path)
shutil.copyfile(source_path, target_path)
def build_files(source, target):
"""Minifies all css and js files, and removes whitespace from html in source
directory and copies it to target.
Arguments:
source, target: strings
"""
print 'Processing %s' % os.path.join(os.getcwd(), source)
print 'Generating into %s' % os.path.join(os.getcwd(), target)
ensure_directory_exists(target)
shutil.rmtree(target)
for root, dirs, files in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirs:
print 'Processing %s' % os.path.join(root, directory)
for filename in files:
source_path = os.path.join(root, filename)
if target in source_path:
continue
if source not in source_path:
continue
target_path = source_path.replace(source, target)
# Ignore files with certain extensions
if any(source_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
continue
if filename.endswith('.html'):
process_html(source_path, target_path)
elif filename.endswith('.css'):
process_css(source_path, target_path)
elif filename.endswith('.js'):
process_js(source_path, target_path)
else:
ensure_directory_exists(target_path)
shutil.copyfile(source_path, target_path)
def get_cache_slug():
"""Returns the cache slug read from file."""
with open('cache_slug.yaml', 'r') as cache_slug_file:
content = cache_slug_file.read()
retrieved_dict = yaml.safe_load(content)
assert isinstance(retrieved_dict, dict)
return retrieved_dict['cache_slug']
if __name__ == '__main__':
CACHE_SLUG = get_cache_slug()
BUILD_DIR = os.path.join('build', CACHE_SLUG)
# os.path.dirname(path)(in ensure_directory_exists()) returns parent
# directory of a path passed as an argument to it. This is as intended
# for file paths, but for directory paths we do not want this to happen,
# hence we append a trailing slash to it.
# Process assets, copy it to build/[cache_slug]/assets
ASSETS_SRC_DIR = os.path.join('assets', '')
ASSETS_OUT_DIR = os.path.join(BUILD_DIR, 'assets', '')
copy_files_source_to_target(ASSETS_SRC_DIR, ASSETS_OUT_DIR)
# Process third_party resources, copy it to
# build/[cache_slug]/third_party/generated
THIRD_PARTY_GENERATED_OUT_DIR = os.path.join(
BUILD_DIR, 'third_party', 'generated')
build_minified_third_party_libs(THIRD_PARTY_GENERATED_OUT_DIR)
# Minify extension static resources, copy it to
# build/[cache_slug]/extensions
EXTENSIONS_SRC_DIR = os.path.join('extensions', '')
EXTENSIONS_OUT_DIR = os.path.join(BUILD_DIR, 'extensions', '')
build_files(EXTENSIONS_SRC_DIR, EXTENSIONS_OUT_DIR)
TEMPLATES_HEAD_DIR = os.path.join('core', 'templates', 'dev', 'head', '')
TEMPLATES_OUT_DIR = os.path.join('core', 'templates', 'prod', 'head', '')
build_files(TEMPLATES_HEAD_DIR, TEMPLATES_OUT_DIR)
# Process core/templates/prod/head/css, copy it to build/[cache_slug]/css
CSS_SRC_DIR = os.path.join('core', 'templates', 'prod', 'head', 'css', '')
CSS_OUT_DIR = os.path.join(BUILD_DIR, 'css', '')
copy_files_source_to_target(CSS_SRC_DIR, CSS_OUT_DIR)
# Copy core/templates/prod/head/ to build/[cache_slug]/templates/head/
TEMPLATES_SRC_DIR = os.path.join('core', 'templates', 'prod', 'head', '')
TEMPLATES_OUT_DIR = os.path.join(BUILD_DIR, 'templates', 'head', '')
copy_files_source_to_target(TEMPLATES_SRC_DIR, TEMPLATES_OUT_DIR)
|
dsquareindia/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/tests/test_approximate.py
|
30
|
"""
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
print('accuracies:', accuracies)
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points,
random_state=42).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
|
allenlavoie/tensorflow
|
refs/heads/master
|
tensorflow/contrib/gan/python/eval/python/classifier_metrics_test.py
|
15
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN classifier_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import tempfile
from absl.testing import parameterized
import numpy as np
from scipy import linalg as scp_linalg
from google.protobuf import text_format
from tensorflow.contrib.gan.python.eval.python import classifier_metrics_impl as classifier_metrics
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
mock = test.mock
def _numpy_softmax(x):
e_x = np.exp(x - np.max(x, axis=1)[:, None])
return e_x / np.sum(e_x, axis=1)[:, None]
def _expected_inception_score(logits):
p = _numpy_softmax(logits)
q = np.expand_dims(np.mean(p, 0), 0)
per_example_logincscore = np.sum(p * (np.log(p) - np.log(q)), 1)
return np.exp(np.mean(per_example_logincscore))
def _expected_mean_only_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
mean = np.square(m - m_v).sum()
mofid = mean
return mofid
def _expected_diagonal_only_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
var = np.var(real_imgs, axis=0)
var_v = np.var(gen_imgs, axis=0)
sqcc = np.sqrt(var * var_v)
mean = (np.square(m - m_v)).sum()
trace = (var + var_v - 2 * sqcc).sum()
dofid = mean + trace
return dofid
def _expected_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
sigma = np.cov(real_imgs, rowvar=False)
sigma_v = np.cov(gen_imgs, rowvar=False)
sqcc = scp_linalg.sqrtm(np.dot(sigma, sigma_v))
mean = np.square(m - m_v).sum()
trace = np.trace(sigma + sigma_v - 2 * sqcc)
fid = mean + trace
return fid
def _expected_trace_sqrt_product(sigma, sigma_v):
return np.trace(scp_linalg.sqrtm(np.dot(sigma, sigma_v)))
# A dummy GraphDef string with the minimum number of Ops.
graphdef_string = """
node {
name: "Mul"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 299
}
dim {
size: 299
}
dim {
size: 3
}
}
}
}
}
node {
name: "logits"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 1001
}
}
}
}
}
node {
name: "pool_3"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 2048
}
}
}
}
}
versions {
producer: 24
}
"""
def _get_dummy_graphdef():
dummy_graphdef = graph_pb2.GraphDef()
text_format.Merge(graphdef_string, dummy_graphdef)
return dummy_graphdef
def _run_with_mock(function, *args, **kwargs):
with mock.patch.object(
classifier_metrics,
'get_graph_def_from_url_tarball') as mock_tarball_getter:
mock_tarball_getter.return_value = _get_dummy_graphdef()
return function(*args, **kwargs)
class ClassifierMetricsTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('GraphDef', False),
('DefaultGraphDefFn', True))
def test_run_inception_graph(self, use_default_graph_def):
"""Test `run_inception` graph construction."""
batch_size = 7
img = array_ops.ones([batch_size, 299, 299, 3])
if use_default_graph_def:
logits = _run_with_mock(classifier_metrics.run_inception, img)
else:
logits = classifier_metrics.run_inception(img, _get_dummy_graphdef())
self.assertTrue(isinstance(logits, ops.Tensor))
logits.shape.assert_is_compatible_with([batch_size, 1001])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
@parameterized.named_parameters(
('GraphDef', False),
('DefaultGraphDefFn', True))
def test_run_inception_graph_pool_output(self, use_default_graph_def):
"""Test `run_inception` graph construction with pool output."""
batch_size = 3
img = array_ops.ones([batch_size, 299, 299, 3])
if use_default_graph_def:
pool = _run_with_mock(
classifier_metrics.run_inception,
img,
output_tensor=classifier_metrics.INCEPTION_FINAL_POOL)
else:
pool = classifier_metrics.run_inception(
img, _get_dummy_graphdef(),
output_tensor=classifier_metrics.INCEPTION_FINAL_POOL)
self.assertTrue(isinstance(pool, ops.Tensor))
pool.shape.assert_is_compatible_with([batch_size, 2048])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_run_inception_multiple_outputs(self):
"""Test `run_inception` graph construction with multiple outputs."""
batch_size = 3
img = array_ops.ones([batch_size, 299, 299, 3])
logits, pool = _run_with_mock(
classifier_metrics.run_inception,
img,
output_tensor=[
classifier_metrics.INCEPTION_OUTPUT,
classifier_metrics.INCEPTION_FINAL_POOL
])
self.assertTrue(isinstance(logits, ops.Tensor))
self.assertTrue(isinstance(pool, ops.Tensor))
logits.shape.assert_is_compatible_with([batch_size, 1001])
pool.shape.assert_is_compatible_with([batch_size, 2048])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_inception_score_graph(self):
"""Test `inception_score` graph construction."""
score = _run_with_mock(
classifier_metrics.inception_score,
array_ops.zeros([6, 299, 299, 3]),
num_batches=3)
self.assertTrue(isinstance(score, ops.Tensor))
score.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_frechet_inception_distance_graph(self):
"""Test `frechet_inception_distance` graph construction."""
img = array_ops.ones([7, 299, 299, 3])
distance = _run_with_mock(
classifier_metrics.frechet_inception_distance, img, img)
self.assertTrue(isinstance(distance, ops.Tensor))
distance.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_run_inception_multicall(self):
"""Test that `run_inception` can be called multiple times."""
for batch_size in (7, 3, 2):
img = array_ops.ones([batch_size, 299, 299, 3])
_run_with_mock(classifier_metrics.run_inception, img)
def test_invalid_input(self):
"""Test that functions properly fail on invalid input."""
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
classifier_metrics.run_inception(array_ops.ones([7, 50, 50, 3]))
p = array_ops.zeros([8, 10])
p_logits = array_ops.zeros([8, 10])
q = array_ops.zeros([10])
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(
array_ops.zeros([8, 10], dtype=dtypes.int32), p_logits, q)
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(p,
array_ops.zeros(
[8, 10], dtype=dtypes.int32), q)
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(p, p_logits,
array_ops.zeros(
[10], dtype=dtypes.int32))
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
classifier_metrics._kl_divergence(array_ops.zeros([8]), p_logits, q)
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
classifier_metrics._kl_divergence(p, array_ops.zeros([8]), q)
with self.assertRaisesRegexp(ValueError, 'must have rank 1'):
classifier_metrics._kl_divergence(p, p_logits, array_ops.zeros([10, 8]))
def test_inception_score_value(self):
"""Test that `inception_score` gives the correct value."""
logits = np.array(
[np.array([1, 2] * 500 + [4]),
np.array([4, 5] * 500 + [6])])
unused_image = array_ops.zeros([2, 299, 299, 3])
incscore = _run_with_mock(classifier_metrics.inception_score, unused_image)
with self.test_session(use_gpu=True) as sess:
incscore_np = sess.run(incscore, {'concat:0': logits})
self.assertAllClose(_expected_inception_score(logits), incscore_np)
def test_mean_only_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
pool_real_a = np.float32(np.random.randn(256, 2048))
pool_gen_a = np.float32(np.random.randn(256, 2048))
tf_pool_real_a = array_ops.constant(pool_real_a)
tf_pool_gen_a = array_ops.constant(pool_gen_a)
mofid_op = classifier_metrics.mean_only_frechet_classifier_distance_from_activations( # pylint: disable=line-too-long
tf_pool_real_a, tf_pool_gen_a)
with self.test_session() as sess:
actual_mofid = sess.run(mofid_op)
expected_mofid = _expected_mean_only_fid(pool_real_a, pool_gen_a)
self.assertAllClose(expected_mofid, actual_mofid, 0.0001)
def test_diagonal_only_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
pool_real_a = np.float32(np.random.randn(256, 2048))
pool_gen_a = np.float32(np.random.randn(256, 2048))
tf_pool_real_a = array_ops.constant(pool_real_a)
tf_pool_gen_a = array_ops.constant(pool_gen_a)
dofid_op = classifier_metrics.diagonal_only_frechet_classifier_distance_from_activations( # pylint: disable=line-too-long
tf_pool_real_a, tf_pool_gen_a)
with self.test_session() as sess:
actual_dofid = sess.run(dofid_op)
expected_dofid = _expected_diagonal_only_fid(pool_real_a, pool_gen_a)
self.assertAllClose(expected_dofid, actual_dofid, 0.0001)
def test_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(512, 256))
fid_op = _run_with_mock(
classifier_metrics.frechet_classifier_distance,
test_pool_real_a,
test_pool_gen_a,
classifier_fn=lambda x: x)
with self.test_session() as sess:
actual_fid = sess.run(fid_op)
expected_fid = _expected_fid(test_pool_real_a, test_pool_gen_a)
self.assertAllClose(expected_fid, actual_fid, 0.0001)
def test_frechet_classifier_distance_covariance(self):
"""Test that `frechet_classifier_distance` takes covariance into account."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_reals, test_pool_gens = [], []
for i in range(1, 11, 2):
test_pool_reals.append(np.float32(np.random.randn(2048, 256) * i))
test_pool_gens.append(np.float32(np.random.randn(2048, 256) * i))
fid_ops = []
for i in range(len(test_pool_reals)):
fid_ops.append(_run_with_mock(
classifier_metrics.frechet_classifier_distance,
test_pool_reals[i],
test_pool_gens[i],
classifier_fn=lambda x: x))
fids = []
with self.test_session() as sess:
for fid_op in fid_ops:
fids.append(sess.run(fid_op))
# Check that the FIDs increase monotonically.
self.assertTrue(all(fid_a < fid_b for fid_a, fid_b in zip(fids, fids[1:])))
def test_trace_sqrt_product_value(self):
"""Test that `trace_sqrt_product` gives the correct value."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(512, 256))
cov_real = np.cov(test_pool_real_a, rowvar=False)
cov_gen = np.cov(test_pool_gen_a, rowvar=False)
trace_sqrt_prod_op = _run_with_mock(classifier_metrics.trace_sqrt_product,
cov_real, cov_gen)
with self.test_session() as sess:
# trace_sqrt_product: tsp
actual_tsp = sess.run(trace_sqrt_prod_op)
expected_tsp = _expected_trace_sqrt_product(cov_real, cov_gen)
self.assertAllClose(actual_tsp, expected_tsp, 0.01)
def test_preprocess_image_graph(self):
"""Test `preprocess_image` graph construction."""
incorrectly_sized_image = array_ops.zeros([520, 240, 3])
correct_image = classifier_metrics.preprocess_image(
images=incorrectly_sized_image)
_run_with_mock(classifier_metrics.run_inception,
array_ops.expand_dims(correct_image, 0))
def test_get_graph_def_from_url_tarball(self):
"""Test `get_graph_def_from_url_tarball`."""
# Write dummy binary GraphDef to tempfile.
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(_get_dummy_graphdef().SerializeToString())
relative_path = os.path.relpath(tmp_file.name)
# Create gzip tarball.
tar_dir = tempfile.mkdtemp()
tar_filename = os.path.join(tar_dir, 'tmp.tar.gz')
with tarfile.open(tar_filename, 'w:gz') as tar:
tar.add(relative_path)
with mock.patch.object(classifier_metrics, 'urllib') as mock_urllib:
mock_urllib.request.urlretrieve.return_value = tar_filename, None
graph_def = classifier_metrics.get_graph_def_from_url_tarball(
'unused_url', relative_path)
self.assertIsInstance(graph_def, graph_pb2.GraphDef)
self.assertEqual(_get_dummy_graphdef(), graph_def)
if __name__ == '__main__':
test.main()
|
lablup/sorna-common
|
refs/heads/20.03
|
src/ai/backend/common/logging_utils.py
|
1
|
import logging
class BraceMessage:
__slots__ = ('fmt', 'args')
def __init__(self, fmt, args):
self.fmt = fmt
self.args = args
def __str__(self):
return self.fmt.format(*self.args)
class BraceStyleAdapter(logging.LoggerAdapter):
def __init__(self, logger, extra=None):
super().__init__(logger, extra)
def log(self, level, msg, *args, **kwargs):
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, BraceMessage(msg, args), (), **kwargs)
|
vinicius-alves/InternetBanking
|
refs/heads/master
|
app/models/transaction_vip.py
|
1
|
from django.db import models
from app.models import TransactionManager
from app.models.data_models import Transaction, Transaction_Type
from datetime import datetime
from pytz import timezone
class TransactionVip (TransactionManager):
def updateDebits(self):
'''
Inicialmente, chama o "Cashier" para verificar se o usuário deve pagar alguma
taxa referente a juros sobre saldo negativo. Caso tenha, o "Cashier" já descontará este
valor. E neste caso, o método cria uma nova transação com o valor a ser debitado, e o
salva no banco. Também atualiza o campo "owing_since" do objeto de conta.
'''
debits = self.cashier.updateDebits()
if(debits !=0):
tax_transaction = Transaction()
type = Transaction_Type.objects.get(id=4)
tax_transaction.setType(type)
tax_transaction.setValue(debits)
tax_transaction.setAccount(self.transaction.getAccount())
datetime_now = datetime.now(timezone('America/Sao_Paulo'))
self.transaction.getAccount().setOwingSince(datetime_now)
self.transaction.getAccount().save()
tax_transaction.save()
def check_if_pay_debits(self):
'''
Verifica se com o valor da transação a ser efetuada, o usuário que era devedor
deixou de ser. Se isto ocorrer, define como null o valor do campo "owing_since"
da conta do usuário.
'''
value = self.transaction.getValue()
account = self.transaction.getAccount()
balance = account.getBalance()
if(balance<0 and value>abs(balance)):
account.setOwingSince(None)
account.save()
return True
return False
def check_if_did_debits(self):
'''
Verifica se com o valor da transação a ser efetuada o usuário se tornou devedor.
Para isto, necessita acessar a conta para obter o saldo.
Caso o usuário se torne devedor, atualiza o campo da conta "owing_since" para
o momento atual.
'''
value = self.transaction.getValue()
account = self.transaction.getAccount()
balance = account.getBalance()
if(value>balance):
datetime_now = datetime.now(timezone('America/Sao_Paulo'))
account.setOwingSince(datetime_now)
account.save()
def withdraw (self):
"""
Atualiza os juros caso o cliente seja devedor, através do método "updateDebits()".
Verifica se, com esta transação o cliente se torna devedor.
Após, recebe um valor através do objeto de transação e repassa para o "Cashier"
a função de decrementar este valor.
Em seguida, requisita que o "Cashier" salve as atualizações na conta.
"""
value = self.transaction.getValue()
self.updateDebits()
self.check_if_did_debits()
self.cashier.decrease(amount=value)
self.cashier.save()
def deposit (self):
"""
Atualiza os juros caso o cliente seja devedor, através do método "updateDebits()".
Verifica se o cliente era devedor e se com esta transação deixou de ser.
Recebe um valor através do objeto de transação e repassa para o "Cashier"
a função de incrementar este valor.
Em seguida, requisita que o "Cashier" salve as atualizações na conta.
"""
self.updateDebits()
self.check_if_pay_debits()
value = self.transaction.getValue()
self.cashier.increase(amount=value)
self.cashier.save()
def doTransfer (self):
"""
Atualiza os juros caso o cliente seja devedor, através do método "updateDebits()".
Verifica se, com esta transação o cliente se torna devedor.
Após, recebe um valor através do objeto de transação e repassa para o "Cashier"
a função de decrementar este valor.
Executa o método "payTransferTax()" para gerar outra transação e salvar os dados
sobre as taxas cobradas na transferência.
Em seguida, requisita que o "Cashier" salve as atualizações na conta.
"""
self.updateDebits()
self.check_if_did_debits()
value = self.transaction.getValue()
self.cashier.decrease(amount=value)
self.payTransferTax()
self.cashier.save()
def receiveTransfer (self):
"""
Atualiza os juros caso o cliente seja devedor, através do método "updateDebits()".
Verifica se o cliente era devedor e se com esta transação deixou de ser.
Recebe um valor através do objeto de transação e repassa para o "Cashier"
a função de incrementar este valor.
Em seguida, requisita que o "Cashier" salve as atualizações na conta.
"""
self.updateDebits()
self.check_if_pay_debits()
value = self.transaction.getValue()
self.cashier.increase(amount=value)
self.cashier.save()
def payTransferTax (self):
"""
Cria uma nova transação do tipo "Pagamento de taxa de transferência bancária", em
seguida ordena ao "Cashier" que decremente a taxa 8% do valor da última transação da
conta e após, salva a transação no banco.
"""
tax_transaction = Transaction()
type_transaction = Transaction_Type.objects.get(id=6)
tax_transaction.setType(type_transaction)
tax = self.transaction.getValue()*0.08
tax_transaction.setValue(tax)
self.cashier.decrease(amount=tax)
tax_transaction.setAccount(self.transaction.getAccount())
tax_transaction.save()
def payExcerpt (self):
'''
Não utilizado. Pois não há taxas sobre emissão de extrato.
'''
self.updateDebits()
self.check_if_did_debits()
raise NotImplementedError
def payHelp (self):
"""
Atualiza os juros caso o cliente seja devedor, através do método "updateDebits()".
Verifica se, com esta transação o cliente se torna devedor.
Após, repassa para o "Cashier" a função de decrementar o valor fixo de R$ 50,00.
Em seguida, requisita que o "Cashier" salve as atualizações na conta.
"""
self.updateDebits()
self.check_if_did_debits()
self.cashier.decrease(amount=50)
self.cashier.save()
class Meta:
managed = False
app_label = 'app'
|
pengphei/cinemaman
|
refs/heads/master
|
ui/MovieListView.py
|
1
|
# -*- coding: utf-8 -*-
import Tkinter as tk
import ttk
from CMApp import *
class CMMovieListView(ttk.Frame):
def __init__(self, root, parent, width_):
ttk.Frame.__init__(self, parent, class_="CMMovieListView", width=width_)
self.root = root
self.parent = parent
self.width = width_
self._setup_widgets()
return
def _setup_widgets(self):
self.movieStrVar = tk.StringVar()
self.movieTitle = ttk.Label(self, text = u"电影列表", width=self.width)
self.movieTitle.pack(fill=tk.X, side=tk.TOP)
self.movieList = tk.Listbox(self, width=self.width, listvariable=self.movieStrVar, selectmode=tk.SINGLE, bg='light green')
self.movieList.pack(fill=tk.BOTH, side=tk.TOP)
# movie list key bindings
self.movieList.bind('<ButtonRelease-1>', self.movie_single_click)
self.movieList.bind('<Double-ButtonRelease-1>', self.movie_double_click)
self.movieToolsFrame = ttk.Frame(self, width=self.width)
self.movieToolsFrame.pack(fill=tk.Y, side=tk.BOTTOM)
self.movieToolAdd = ttk.Button(self.movieToolsFrame, text = '添加', command=self.movie_add)
self.movieToolAdd.pack(fill=tk.Y, side=tk.LEFT)
self.movieToolDel = ttk.Button(self.movieToolsFrame, text = '删除', command=self.movie_del)
self.movieToolDel.pack(fill=tk.Y, side=tk.LEFT)
self.movieToolEdit = ttk.Button(self.movieToolsFrame, text = '编辑', command=self.movie_edit)
self.movieToolEdit.pack(fill=tk.Y, side=tk.LEFT)
self.movie_list_update()
return
def movie_list_update(self):
# add movie
movies = []
for idx in range(len(gInfo.movie_list)):
movies.append(gInfo.movie_list[idx].name)
self.movieStrVar.set(tuple(movies))
return
def movie_single_click(self, event):
idxs = self.movieList.curselection()
if(len(idxs) == 0):
return
gInfo.movie_focus = idxs[0]
self.focus_movie = gInfo.movie_list[gInfo.movie_focus]
print("movie single click")
print(self.focus_movie.name)
return
def movie_double_click(self, event):
idxs = self.movieList.curselection()
if(len(idxs) == 0):
return
gInfo.movie_focus = idxs[0]
self.focus_movie = gInfo.movie_list[gInfo.movie_focus]
print("mocie double click")
print(self.focus_movie.name)
self.movie_edit()
return
def movie_add(self):
dialog = MovieDialog(self.root)
dialog.open_add(self)
return
def movie_del(self):
if(len(gInfo.movie_list) == 0 or gInfo.movie_focus == -1):
return
dialog = MovieDialog(self.root)
dialog.open_del(self, self.focus_movie.name)
return
def movie_edit(self):
if(len(gInfo.movie_list) == 0 or gInfo.movie_focus == -1):
return
dialog = MovieDialog(self.root)
dialog.open_edit(self, self.focus_movie)
return
def movie_add_confirm(self, movie):
movie.id = gInfo.db_movie.add(gInfo.db.conn, movie)
gInfo.movie_list.append(movie);
self.movie_list_update()
return
def movie_edit_confirm(self, movie):
if(len(gInfo.movie_list) == 0 or gInfo.movie_focus == -1):
return
movie_old = gInfo.movie_list[gInfo.movie_focus]
movie.id = movie_old.id
gInfo.db_movie.edit(gInfo.db.conn, movie)
gInfo.movie_list[gInfo.movie_focus] = movie
self.movie_list_update()
return
def movie_del_confirm(self):
if(len(gInfo.movie_list) == 0 or gInfo.movie_focus == -1):
return
movie_del = gInfo.movie_list[gInfo.movie_focus]
gInfo.db_movie.delete(gInfo.db.conn, movie_del)
gInfo.movie_list.pop(gInfo.movie_focus)
last = len(gInfo.movie_list) - 1
if(gInfo.movie_focus > last):
gInfo.movie_focus = last
self.movie_list_update()
return
|
xrmx/django
|
refs/heads/master
|
django/contrib/gis/gdal/__init__.py
|
130
|
"""
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import (check_err, GDALException,
OGRException, OGRIndexError, SRSException) # NOQA
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDAL_VERSION', 'SpatialReference', 'CoordTransform', 'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
|
scorpilix/Golemtest
|
refs/heads/develop
|
tests/golem/core/test_fileencrypt.py
|
2
|
import os
import random
from golem.core.fileencrypt import FileHelper, FileEncryptor, AESFileEncryptor
from golem.resource.dirmanager import DirManager
from golem.tools.testdirfixture import TestDirFixture
class TestAESFileEncryptor(TestDirFixture):
""" Test encryption using AESFileEncryptor """
def setUp(self):
TestDirFixture.setUp(self)
self.dir_manager = DirManager(self.path)
self.res_dir = self.dir_manager.get_task_temporary_dir('test_task')
self.test_file_path = os.path.join(self.res_dir, 'test_file')
self.enc_file_path = os.path.join(self.res_dir, 'test_file.enc')
with open(self.test_file_path, 'wb') as f:
for i in xrange(0, 100):
f.write(bytearray(random.getrandbits(8) for _ in xrange(32)))
def test_encrypt(self):
""" Test encryption procedure """
secret = FileEncryptor.gen_secret(10, 20)
if os.path.exists(self.enc_file_path):
os.remove(self.enc_file_path)
AESFileEncryptor.encrypt(self.test_file_path,
self.enc_file_path,
secret)
self.assertTrue(os.path.exists(self.enc_file_path))
with open(self.enc_file_path, 'rb') as f:
encrypted = f.read()
self.assertEqual(
len(encrypted) % AESFileEncryptor.block_size, 0,
"Incorrect ciphertext size: {}. Should be multiple of {}".format(len(encrypted),
AESFileEncryptor.block_size))
def test_decrypt(self):
""" Test decryption procedure """
secret = FileEncryptor.gen_secret(10, 20)
decrypted_path = self.test_file_path + ".dec"
if os.path.exists(self.enc_file_path):
os.remove(self.enc_file_path)
AESFileEncryptor.encrypt(self.test_file_path,
self.enc_file_path,
secret)
AESFileEncryptor.decrypt(self.enc_file_path,
decrypted_path,
secret)
self.assertEqual(os.path.getsize(self.test_file_path),
os.path.getsize(decrypted_path))
with open(self.test_file_path) as f1, open(decrypted_path) as f2:
while True:
chunk1 = f1.read(32)
chunk2 = f2.read(32)
if chunk1 != chunk2:
raise ValueError("Invalid decrypted file chunk")
elif not chunk1 and not chunk2:
break
AESFileEncryptor.decrypt(self.enc_file_path,
decrypted_path,
secret + "0")
decrypted = True
if os.path.getsize(self.test_file_path) != os.path.getsize(decrypted_path):
decrypted = False
else:
with open(self.test_file_path) as f1, open(decrypted_path) as f2:
while True:
chunk1 = f1.read(32)
chunk2 = f2.read(32)
if chunk1 != chunk2:
decrypted = False
break
elif not chunk1 and not chunk2:
break
self.assertFalse(decrypted)
def test_get_key_and_iv(self):
""" Test helper methods: gen_salt and get_key_and_iv """
salt = AESFileEncryptor.gen_salt(AESFileEncryptor.block_size)
self.assertEqual(len(salt), AESFileEncryptor.block_size - AESFileEncryptor.salt_prefix_len)
secret = FileEncryptor.gen_secret(10, 20)
self.assertGreaterEqual(len(secret), 10)
self.assertLessEqual(len(secret), 20)
key_len = 32
iv_len = AESFileEncryptor.block_size
key, iv = AESFileEncryptor.get_key_and_iv(secret, salt, key_len, iv_len)
self.assertEqual(len(key), key_len)
self.assertEqual(len(iv), iv_len)
class TestFileHelper(TestDirFixture):
""" Tests for FileHelper class """
def setUp(self):
TestDirFixture.setUp(self)
self.dir_manager = DirManager(self.path)
self.res_dir = self.dir_manager.get_task_temporary_dir('test_task')
self.test_file_path = os.path.join(self.res_dir, 'test_file')
open(self.test_file_path, 'w').close()
def test_file_helper(self):
""" Test opening file with FileHelper """
mode = 'r'
# Test opening with file path
with FileHelper(self.test_file_path, mode) as f:
self.assertIsInstance(f, file)
self.assertEqual(f.mode, mode)
# Test opening with file
with open(self.test_file_path, mode) as file_:
with FileHelper(file_, mode) as f:
self.assertIsInstance(f, file)
self.assertEqual(f.mode, mode)
|
aerophile/django
|
refs/heads/master
|
django/db/backends/sqlite3/utils.py
|
108
|
from django.conf import settings
from django.utils import timezone
from django.utils.dateparse import parse_datetime
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
|
GbalsaC/bitnamiP
|
refs/heads/master
|
edx-ora2/docs/en_us/course_authors/source/conf.py
|
48
|
# -*- coding: utf-8 -*-
#
import sys, os
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
sys.path.append(os.path.abspath('../../../'))
sys.path.append(os.path.abspath('../../'))
#from docs.shared.conf import *
sys.path.insert(0, os.path.abspath('.'))
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
#templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path.append('source/_static')
# General information about the project.
project = u'Creating a Peer Assessment'
copyright = u'2014, edX'
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
|
111pontes/ydk-py
|
refs/heads/master
|
cisco-ios-xe/ydk/models/cisco_ios_xe/ATM_TC_MIB.py
|
1
|
""" ATM_TC_MIB
This MIB Module provides Textual Conventions
and OBJECT\-IDENTITY Objects to be used by
ATM systems.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
from ydk.models.ietf.ietf_yang_smiv2 import ObjectIdentityIdentity
class AtmconncasttypeEnum(Enum):
"""
AtmconncasttypeEnum
The type of topology of a connection (point\-
to\-point, point\-to\-multipoint). In the case
of point\-to\-multipoint, the orientation of
this VPL or VCL in the connection.
On a host\:
\- p2mpRoot indicates that the host
is the root of the p2mp connection.
\- p2mpLeaf indicates that the host
is a leaf of the p2mp connection.
On a switch interface\:
\- p2mpRoot indicates that cells received
by the switching fabric from the interface
are from the root of the p2mp connection.
\- p2mpLeaf indicates that cells transmitted
to the interface from the switching fabric
are to the leaf of the p2mp connection.
.. data:: p2p = 1
.. data:: p2mpRoot = 2
.. data:: p2mpLeaf = 3
"""
p2p = 1
p2mpRoot = 2
p2mpLeaf = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmconncasttypeEnum']
class AtmconnkindEnum(Enum):
"""
AtmconnkindEnum
The type of call control used for an ATM
connection at a particular interface. The use
is as follows\:
pvc(1)
Virtual link of a PVC. Should not be
used for an PVC/SVC (i.e., Soft PVC)
crossconnect.
svcIncoming(2)
Virtual link established after a
received signaling request to setup
an SVC.
svcOutgoing(3)
Virtual link established after a
transmitted or forwarded signaling
request to setup an SVC.
spvcInitiator(4)
Virtual link at the PVC side of an
SVC/PVC crossconnect, where the
switch is the initiator of the Soft PVC
setup.
spvcTarget(5)
Virtual link at the PVC side of an
SVC/PVC crossconnect, where the
switch is the target of the Soft PVC
setup.
For PVCs, a pvc virtual link is always cross\-
connected to a pvc virtual link.
For SVCs, an svcIncoming virtual link is always cross\-
connected to an svcOutgoing virtual link.
For Soft PVCs, an spvcInitiator is either cross\-connected to
an svcOutgoing or an spvcTarget, and an spvcTarget is either
cross\-connected to an svcIncoming or an spvcInitiator.
.. data:: pvc = 1
.. data:: svcIncoming = 2
.. data:: svcOutgoing = 3
.. data:: spvcInitiator = 4
.. data:: spvcTarget = 5
"""
pvc = 1
svcIncoming = 2
svcOutgoing = 3
spvcInitiator = 4
spvcTarget = 5
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmconnkindEnum']
class AtminterfacetypeEnum(Enum):
"""
AtminterfacetypeEnum
The connection setup procedures used for the
identified interface.
Other\: Connection setup procedures other than
those listed below.
Auto\-configuration\:
Indicates that the connection setup
procedures are to be determined dynamically,
or that determination has not yet been
completed. One such mechanism is via ATM
Forum ILMI auto\-configuration procedures.
ITU\-T DSS2\:
\- ITU\-T Recommendation Q.2931, Broadband
Integrated Service Digital Network (B\-ISDN)
Digital Subscriber Signalling System No.2
(DSS2) User\-Network Interface (UNI) Layer 3
Specification for Basic Call/Connection
Control (September 1994)
\- ITU\-T Draft Recommendation Q.2961,
B\-ISDN DSS 2 Support of Additional Traffic
Parameters (May 1995)
\- ITU\-T Draft Recommendation Q.2971,
B\-ISDN DSS 2 User Network Interface Layer 3
Specification for Point\-to\-multipoint
Call/connection Control (May 1995)
ATM Forum UNI 3.0\:
ATM Forum, ATM User\-Network Interface,
Version 3.0 (UNI 3.0) Specification,
(1994).
ATM Forum UNI 3.1\:
ATM Forum, ATM User\-Network Interface,
Version 3.1 (UNI 3.1) Specification,
(November 1994).
ATM Forum UNI Signalling 4.0\:
ATM Forum, ATM User\-Network Interface (UNI)
Signalling Specification Version 4.0,
af\-sig\-0061.000 (June 1996).
ATM Forum IISP (based on UNI 3.0 or UNI 3.1) \:
Interim Inter\-switch Signaling Protocol
(IISP) Specification, Version 1.0,
af\-pnni\-0026.000, (December 1994).
ATM Forum PNNI 1.0 \:
ATM Forum, Private Network\-Network Interface
Specification, Version 1.0, af\-pnni\-0055.000,
(March 1996).
ATM Forum B\-ICI\:
ATM Forum, B\-ICI Specification, Version 2.0,
af\-bici\-0013.002, (November 1995).
ATM Forum UNI PVC Only\:
An ATM Forum compliant UNI with the
signalling disabled.
ATM Forum NNI PVC Only\:
An ATM Forum compliant NNI with the
signalling disabled.
.. data:: other = 1
.. data:: autoConfig = 2
.. data:: ituDss2 = 3
.. data:: atmfUni3Dot0 = 4
.. data:: atmfUni3Dot1 = 5
.. data:: atmfUni4Dot0 = 6
.. data:: atmfIispUni3Dot0 = 7
.. data:: atmfIispUni3Dot1 = 8
.. data:: atmfIispUni4Dot0 = 9
.. data:: atmfPnni1Dot0 = 10
.. data:: atmfBici2Dot0 = 11
.. data:: atmfUniPvcOnly = 12
.. data:: atmfNniPvcOnly = 13
"""
other = 1
autoConfig = 2
ituDss2 = 3
atmfUni3Dot0 = 4
atmfUni3Dot1 = 5
atmfUni4Dot0 = 6
atmfIispUni3Dot0 = 7
atmfIispUni3Dot1 = 8
atmfIispUni4Dot0 = 9
atmfPnni1Dot0 = 10
atmfBici2Dot0 = 11
atmfUniPvcOnly = 12
atmfNniPvcOnly = 13
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtminterfacetypeEnum']
class AtmservicecategoryEnum(Enum):
"""
AtmservicecategoryEnum
The service category for a connection.
.. data:: other = 1
.. data:: cbr = 2
.. data:: rtVbr = 3
.. data:: nrtVbr = 4
.. data:: abr = 5
.. data:: ubr = 6
"""
other = 1
cbr = 2
rtVbr = 3
nrtVbr = 4
abr = 5
ubr = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmservicecategoryEnum']
class AtmvorxadminstatusEnum(Enum):
"""
AtmvorxadminstatusEnum
The value determines the desired administrative
status of a virtual link or cross\-connect. The up
and down states indicate that the traffic flow is
enabled or disabled respectively on the virtual
link or cross\-connect.
.. data:: up = 1
.. data:: down = 2
"""
up = 1
down = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmvorxadminstatusEnum']
class AtmvorxoperstatusEnum(Enum):
"""
AtmvorxoperstatusEnum
The value determines the operational status of a
virtual link or cross\-connect. The up and down
states indicate that the traffic flow is enabled
or disabled respectively on the virtual link or
cross\-connect. The unknown state indicates that
the state of it cannot be determined. The state
will be down or unknown if the supporting ATM
interface(s) is down or unknown respectively.
.. data:: up = 1
.. data:: down = 2
.. data:: unknown = 3
"""
up = 1
down = 2
unknown = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmvorxoperstatusEnum']
class AtmclptransparentscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for the CLP\-
transparent model with Sustained Cell Rate.
The use of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0+1 traffic
Parameter 3\: maximum burst size in cells
Parameter 4\: CDVT in tenths of microseconds
Parameter 5\: not used.
This traffic descriptor type is applicable to
connections following the VBR.1 conformance
definition.
Connections specifying this traffic descriptor
type will be rejected at UNI 3.0 or UNI 3.1
interfaces. For a similar traffic descriptor
type that can be accepted at UNI 3.0 and
UNI 3.1 interfaces, see atmNoClpScr.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclptransparentscrIdentity']['meta_info']
class AtmclpnotaggingmcrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for CLP with
Minimum Cell Rate and no tagging. The use of
the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: CDVT in tenths of microseconds
Parameter 3\: minimum cell rate in cells/second
Parameter 4\: unused
Parameter 5\: unused.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclpnotaggingmcrIdentity']['meta_info']
class AtmnoclpnoscrcdvtIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for no CLP
and no Sustained Cell Rate. The use of the
parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: CDVT in tenths of microseconds
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
This traffic descriptor type is applicable to
CBR connections following the UNI 3.0/3.1
conformance definition for PCR CLP=0+1.
These CBR connections differ from CBR.1
connections in that the CLR objective
applies only to the CLP=0 cell flow.
This traffic descriptor type is also
applicable to connections following the UBR.1
conformance definition.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnoclpnoscrcdvtIdentity']['meta_info']
class AtmclptaggingscrcdvtIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for CLP with
tagging and Sustained Cell Rate. The use of
the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0 traffic, excess tagged as
CLP=1
Parameter 3\: maximum burst size in cells
Parameter 4\: CDVT in tenths of microseconds
Parameter 5\: not used.
This traffic descriptor type is applicable to
connections following the VBR.3 conformance
definition.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclptaggingscrcdvtIdentity']['meta_info']
class AtmclpnotaggingscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for CLP with
Sustained Cell Rate and no tagging. The use
of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0 traffic
Parameter 3\: maximum burst size in cells
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclpnotaggingscrIdentity']['meta_info']
class AtmnoclpscrcdvtIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for no CLP
with Sustained Cell Rate. The use of the
parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0+1 traffic
Parameter 3\: maximum burst size in cells
Parameter 4\: CDVT in tenths of microseconds
Parameter 5\: not used.
This traffic descriptor type is applicable
to VBR connections following the UNI 3.0/3.1
conformance definition for PCR CLP=0+1 and
SCR CLP=0+1. These VBR connections
differ from VBR.1 connections in that
the CLR objective applies only to the CLP=0
cell flow.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnoclpscrcdvtIdentity']['meta_info']
class AtmnotrafficdescriptorIdentity(ObjectIdentityIdentity):
"""
This identifies the no ATM traffic
descriptor type. Parameters 1, 2, 3, 4,
and 5 are not used. This traffic descriptor
type can be used for best effort traffic.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnotrafficdescriptorIdentity']['meta_info']
class AtmclptransparentnoscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for the CLP\-
transparent model and no Sustained Cell Rate.
The use of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: CDVT in tenths of microseconds
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
This traffic descriptor type is applicable to
connections following the CBR.1 conformance
definition.
Connections specifying this traffic descriptor
type will be rejected at UNI 3.0 or UNI 3.1
interfaces. For a similar traffic descriptor
type that can be accepted at UNI 3.0 and
UNI 3.1 interfaces, see atmNoClpNoScr.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclptransparentnoscrIdentity']['meta_info']
class AtmclptaggingscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for CLP with
tagging and Sustained Cell Rate. The use of
the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0 traffic, excess tagged as
CLP=1
Parameter 3\: maximum burst size in cells
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclptaggingscrIdentity']['meta_info']
class AtmnoclpnoscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for no CLP
and no Sustained Cell Rate. The use of the
parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: not used
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnoclpnoscrIdentity']['meta_info']
class AtmnoclpscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for no CLP
with Sustained Cell Rate. The use of the
parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0+1 traffic
Parameter 3\: maximum burst size in cells
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnoclpscrIdentity']['meta_info']
class AtmclpnotaggingnoscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor is for CLP without
tagging and no Sustained Cell Rate. The use
of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: peak cell rate in cells/second
for CLP=0 traffic
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclpnotaggingnoscrIdentity']['meta_info']
class AtmclptaggingnoscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor is for CLP with
tagging and no Sustained Cell Rate. The use
of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: peak cell rate in cells/second
for CLP=0 traffic, excess
tagged as CLP=1
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclptaggingnoscrIdentity']['meta_info']
class AtmclpnotaggingscrcdvtIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for CLP with
Sustained Cell Rate and no tagging. The use
of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0 traffic
Parameter 3\: maximum burst size in cells
Parameter 4\: CDVT in tenths of microseconds
Parameter 5\: not used.
This traffic descriptor type is applicable to
connections following the VBR.2 conformance
definition.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclpnotaggingscrcdvtIdentity']['meta_info']
class AtmnoclptaggingnoscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for no CLP
with tagging and no Sustained Cell Rate. The
use of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: CDVT in tenths of microseconds
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
This traffic descriptor type is applicable to
connections following the UBR.2 conformance
definition .
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnoclptaggingnoscrIdentity']['meta_info']
|
sinkuri256/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/pair/ip.py
|
55
|
# -*- test-case-name: twisted.pair.test.test_ip -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Support for working directly with IP packets"""
import struct
import socket
from twisted.internet import protocol
from twisted.pair import raw
from zope.interface import implements
class IPHeader:
def __init__(self, data):
(ihlversion, self.tos, self.tot_len, self.fragment_id, frag_off,
self.ttl, self.protocol, self.check, saddr, daddr) \
= struct.unpack("!BBHHHBBH4s4s", data[:20])
self.saddr = socket.inet_ntoa(saddr)
self.daddr = socket.inet_ntoa(daddr)
self.version = ihlversion & 0x0F
self.ihl = ((ihlversion & 0xF0) >> 4) << 2
self.fragment_offset = frag_off & 0x1FFF
self.dont_fragment = (frag_off & 0x4000 != 0)
self.more_fragments = (frag_off & 0x2000 != 0)
MAX_SIZE = 2L**32
class IPProtocol(protocol.AbstractDatagramProtocol):
implements(raw.IRawPacketProtocol)
def __init__(self):
self.ipProtos = {}
def addProto(self, num, proto):
proto = raw.IRawDatagramProtocol(proto)
if num < 0:
raise TypeError, 'Added protocol must be positive or zero'
if num >= MAX_SIZE:
raise TypeError, 'Added protocol must fit in 32 bits'
if num not in self.ipProtos:
self.ipProtos[num] = []
self.ipProtos[num].append(proto)
def datagramReceived(self,
data,
partial,
dest,
source,
protocol):
header = IPHeader(data)
for proto in self.ipProtos.get(header.protocol, ()):
proto.datagramReceived(data=data[20:],
partial=partial,
source=header.saddr,
dest=header.daddr,
protocol=header.protocol,
version=header.version,
ihl=header.ihl,
tos=header.tos,
tot_len=header.tot_len,
fragment_id=header.fragment_id,
fragment_offset=header.fragment_offset,
dont_fragment=header.dont_fragment,
more_fragments=header.more_fragments,
ttl=header.ttl,
)
|
MobinRanjbar/hue
|
refs/heads/master
|
desktop/core/ext-py/python-ldap-2.3.13/Lib/ldap/__init__.py
|
44
|
"""
ldap - base module
See http://www.python-ldap.org/ for details.
$Id: __init__.py,v 1.70 2011/02/19 14:36:53 stroeder Exp $
"""
# This is also the overall release version number
__version__ = '2.3.13'
import sys
if __debug__:
# Tracing is only supported in debugging mode
import traceback
_trace_level = 0
_trace_file = sys.stderr
_trace_stack_limit = None
from _ldap import *
class DummyLock:
"""Define dummy class with methods compatible to threading.Lock"""
def __init__(self):
pass
def acquire(self):
pass
def release(self):
pass
try:
# Check if Python installation was build with thread support
import thread
except ImportError:
LDAPLockBaseClass = DummyLock
else:
import threading
LDAPLockBaseClass = threading.Lock
class LDAPLock:
"""
Mainly a wrapper class to log all locking events.
Note that this cumbersome approach with _lock attribute was taken
since threading.Lock is not suitable for sub-classing.
"""
_min_trace_level = 2
def __init__(self,lock_class=None,desc=''):
"""
lock_class
Class compatible to threading.Lock
desc
Description shown in debug log messages
"""
self._desc = desc
self._lock = (lock_class or LDAPLockBaseClass)()
def acquire(self):
if __debug__:
if _trace_level>=self._min_trace_level:
_trace_file.write('***%s %s.acquire()\n' % (self._desc,self.__class__.__name__))
return self._lock.acquire()
def release(self):
if __debug__:
if _trace_level>=self._min_trace_level:
_trace_file.write('***%s %s.release()\n' % (self._desc,self.__class__.__name__))
return self._lock.release()
# Create module-wide lock for serializing all calls into underlying LDAP lib
_ldap_module_lock = LDAPLock(desc='Module wide')
from functions import open,initialize,init,get_option,set_option
from ldap.dn import explode_dn,explode_rdn,str2dn,dn2str
del str2dn
del dn2str
# More constants
# For compability of 2.3 and 2.4 OpenLDAP API
OPT_DIAGNOSTIC_MESSAGE = OPT_ERROR_STRING
|
benjaminpope/whisky
|
refs/heads/master
|
Seeing/phasescreen.py
|
2
|
# ------------------------------------------------------------ phasescreen.py ----------------------------------------
# Author: Alexey Latyshev --------------------------------------------------------------------------------------------
# ------------------- This file contains functions for generating atmosphere phasescreen and getting its evolution----
# ------------------- above a pupil---------------------------------------------- ------------------------------------
# ====================================================================================================================
import numpy as np
import sys
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------phaseScreen-------------------------------------------------------------
# generating phasescreen using Kolmogorov/von Karmen model
# --------------------------------------------------------------------------------------------------------------------
# for reference see
# Welsh, Byron M.
# "Fourier-series-based atmospheric phase screen generator for simulating anisoplanatic geometries and temporal evolution."
# Optical Science, Engineering and Instrumentation'97. International Society for Optics and Photonics, 1997.
### input parameters
# size=1.0 # size of the screen (m)
# scale=1024.0 # scale factor (pixels/m)
# r0=0.3 # Fried parameter (m)
# seed = 0 # seed for random (-1 = no seed)
# L0=10.0 # outer scale in meters (-1 = infinity)
# ao = 10.0 # actuators density (number of actuators per metre) for AO correction. 0 if no correction
#strehl # desired strehl ratio (inflate the result atmosphere)
'''
# The following parameters are not used anymore
# aopower=1.0 # power of ao amplitutes correction
#fc=10. # cutoff frequency (in lambda/D)
#lambdaD #lambda/D value in pixels (=int(rad2mas(lambda/D)*pscale)+1)
#telSize= telescope diameter in meters to avoid tilt
#NB: if any of aopower, fc, lambdaD parameters = 0 then no AO applied
'''
### output parameter
# phases - array with phase shifts
#def phaseScreen(size,scale,r0=0.2,ao=0.0,L0=-1.0,seed=-1,aopower=50.0,fc=10.,lambdaD=0.,strehl=0.0) :
def phaseScreen(size,scale,r0=0.2,ao=0.0,L0=-1.0,seed=-1,strehl=0.0,showStrehl=True,telSize=0.) :
''' generating phasescreen using Kolmogorov/von Karmen model '''
# size of screen in pixels
Nxy=int(np.round(size*scale))
#
# Calculating center of the image
Nc = Nxy //2
#
# initializing random generator
if seed != -1 :
np.random.seed(seed)
#
# generating random complex array size x size with normal distribution
gaussc=np.random.randn(Nxy,Nxy)+1j*np.random.randn(Nxy,Nxy)
#
#generating spatial freqs (normalized)
#freqs=(np.arange(Nxy,dtype=float)-Nc)/(2*size)
#generating spatial freqs (normalized)
freqs=(np.arange(Nxy,dtype=float)-Nc)/(2*scale*size)
#
#creating array (rho) with distances (spatial freqs)
xx,yy = np.meshgrid(freqs, freqs)
rho = np.hypot(yy,xx)
# avoiding division by zero
if rho.min()==0:
rho[rho.argmin()/len(rho),rho.argmin()-int(rho.argmin()/len(rho))*len(rho)]=1e-9
# Obtaining amplitutes from power spectrum density
# correcting r0 to match the units
r0_fixed=r0/float(size)
if L0==-1 :
asd=np.sqrt(0.023)*(np.power(r0_fixed*rho,-5.0/6)/rho)
else : asd=np.sqrt(0.023)*(np.power(r0_fixed,-5.0/6))*pow((np.pow(rho,2)+pow(L0,2)),-11.0/12)
# Removing infinity from psd
asd[Nc,Nc]=0
#
# applying AO (supergaussian filter) if required
# Section is replaced by Frantz's code
'''
# Alexey's version
if ao != 0 :
filter = 1 - np.exp(-1.0*pow((rho*2*scale/ao),10.0))
aopow=aopower
else :
filter = np.ones(np.shape(asd))
aopow=1.0
#
#generating phasescreen
phases=np.fft.ifft2(np.fft.fftshift(asd*(filter*(aopow-1)/aopow+aopow)*gaussc)).real
# end
'''
'''
# Frantz's version
if (np.abs(aopower)+np.abs(lambdaD)+np.abs(fc)) >0 :
in_fc = (rho <= (fc*lambdaD/(2*scale*size)))
asd[in_fc]/=aopower #
phases=np.fft.ifft2(np.fft.fftshift(asd*gaussc)).real
# end
'''
# Peter's version
if ao>0 :
in_fc= (rho<= (ao/(2*scale)))
#flat spectrum inside
asd[in_fc]=asd[True-in_fc].max()
if telSize>0 :
sz=int(np.ceil(size/telSize*2))
asd[Nc-sz:Nc+sz+1,Nc-sz:Nc+sz+1]=0
phases=np.fft.ifft2(np.fft.fftshift(asd*gaussc)).real
# end
s = np.exp(-(phases-phases.mean()).std()**2)# current strehl
if showStrehl : print("Current strehl: %f" % (s))
if strehl > 0 :
phases *= np.sqrt(np.log(strehl)/np.log(s))
return phases
# --!EOFunc-----------------------------------------------------------------------------------------------------------
# Code for test and draw
# phases=phaseScreen(1,2048,r0=0.3,seed=0,ao=0)
#result=plt.matshow(phases)
#plt.show(result)
# checks
#print ("Max phase delay = %f nm" % (np.max(phases)-np.min(phases)/1e-9))
#print ("Max phase difference at 1000nm (rad) = %f" % (np.max(phases)-np.min(phases)*2*3.14/1e-6))
#print ("Test rms = %f" % phases.std())
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------delayToPhase------------------------------------------------------------
# changing wave delay to phase delay for a particular wavelength
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------------
# phasescreen - input phasescreen (for original or close wavelength) with delay in meters
# wl - wavelength
# norm - normalize to [0:2*pi)
# output:
# - phases - delay in radians
def delayToPhase(phasescreen,wl=1e-6, norm=False) :
''' changing wave delay to phase delay for a particular wavelength '''
if norm==True :
phases=((phasescreen-phasescreen.min()) % wl)/wl*2*np.pi
else :
phases=(phasescreen-phasescreen.min())/wl*2*np.pi
return phases
# --!EOFunc-----------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------phaseeToDelay------------------------------------------------------------
# changing phase delay to wave delay for a particular wavelength
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------------
# phasescreen - input phasescreen (for original or close wavelength) with delay in radians
# wl - wavelength
# output:
# - phases - delay in meters
def phaseToDelay(phasescreen,wl=1e-6) :
''' changing phase delay to wave delay for a particular wavelength '''
wl_delay = phasescreen*(wl/(2*np.pi))
return wl_delay
# --!EOFunc-----------------------------------------------------------------------------------------------------------
# ----------------------------------------extractRegion---------------------------------------------------------------
# the function takes matrix data as a periodic one and extracts data of sizex x sizey beginning in (x,y) cooordinate
# NB: Please keep in mind that 1st index of a matrix is related to Y axis and 2nd index is to X axis
# --------------------------------------------------------------------------------------------------------------------
def extractRegion(data,x,y,sizex,sizey) :
''' the function takes matrix data as a periodic
one and extracts data of sizex x sizey beginning in (x,y) cooordinate '''
region=np.empty((sizey,sizex),dtype=data.dtype)
# placing the starting point inside the matrix
new_y=int(y-np.floor(y/data.shape[0])*data.shape[0])
new_x=int(x-np.floor(x/data.shape[1])*data.shape[1])
# calculating the point inside data matrix where region ends
x_end = new_x+sizex-int(np.floor((new_x+sizex-1)/data.shape[1])*data.shape[1])
y_end = new_y+sizey-int(np.floor((new_y+sizey-1)/data.shape[0])*data.shape[0])
#extract region
# data border in general may split the region into following rectangles
# 1 2
# 3 4
if x_end>new_x and y_end>new_y : # case 1: all the rectangles are within the data border
region[0:sizey,0:sizex]=data[new_y:y_end,new_x:x_end]
elif x_end>new_x and y_end<=new_y : # case 2: rectangles 3 and 4 are out of the data border
region[0:(data.shape[1]-new_y),0:sizex]=data[new_y:data.shape[1],new_x:x_end]
region[(data.shape[1]-new_y):sizey,0:sizex]=data[0:y_end,new_x:x_end]
elif x_end<=new_x and y_end>new_y : # case 3: rectangles 2 and 4 are out of the data border
region[0:sizey,0:data.shape[0]-new_x]=data[new_y:y_end,new_x:data.shape[0]]
region[0:sizey,(data.shape[0]-new_x):sizex]=data[new_y:y_end,0:x_end]
elif x_end<=new_x and y_end<=new_y : # case 4: rectangles 2, 3 and 4 are out of the data border
# rect 1
region[0:(data.shape[1]-new_y),0:(data.shape[0]-new_x)]=data[new_y:(data.shape[1]),new_x:(data.shape[0])]
# rect 4
region[(data.shape[1]-new_y):sizey,(data.shape[0]-new_x):sizex]=data[0:y_end,0:x_end]
# rect 2
region[0:(data.shape[1]-new_y),(data.shape[0]-new_x):sizex]=data[new_y:data.shape[1],0:x_end]
# rect 3
region[(data.shape[1]-new_y):sizey,0:(data.shape[0]-new_x)]=data[0:y_end,new_x:data.shape[0]]
return region
# --!EOFunc-----------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------getPupilScreen----------------------------------------------------
# A function for calculating phasescreen over the pupil at an arbitary moment of time t
# --------------------------------------------------------------------------------------------------------------------
# Here we assume that wind is blowing with constant speed v and (0,0) coordinate of pupil is located
# at (x0,y0) point of phasescreen. The direction of wind is detected automatically to get the maximum coverage of phasescreen
# phases # pre-generated phasescreen
# pupilSize=0.5 # pupil diameter in meters (NB: MUST be smaller than phasescreen)
# scale=2048.0 # scale factor (pixels/m)
# v=1.0 # wind velocity (m/s)
# t # point of time to get phasescreen from
# x0=0, y0=0 # initial coordinates (assuming the screen is periodic)
def getPupilScreen(phases,pupilSize,scale,v,t,x0=0,y0=0) :
''' A function for calculating phasescreen over the pupil at an arbitary moment of time t '''
#
# calculating pupil diameter in pixels
pupilpix=int(np.floor(scale*pupilSize))
#
# getting screen size in pixels
screenpix=len(phases)
#
# movement path angle (tg)
#mvtangle = 1.0*pupilpix/screenpix
mvlength=np.hypot(screenpix,pupilpix)
#
# converting velocity and velocity projections to pixels/s
vpix=np.floor(scale*v)
vpix_x=int(np.round(vpix*screenpix/mvlength))
vpix_y=int(np.round(vpix*pupilpix/mvlength))
#
# a number of checks
if screenpix< pupilpix :
print("Phase screen is too small. The minimum size is %f m" % (screenpix/scale))
return
if t*vpix_x+x0 > np.floor(screenpix/pupilpix)*screenpix :
print("Time value is too big. The maximum time is %f s" % (np.floor(screenpix/pupilpix)*screenpix/vpix_x))
return
# current shift of a screen
mv_x=int(vpix_x*t)
mv_y=int(vpix_y*t)
#
return extractRegion(phases,x0+mv_x,y0+mv_y,pupilpix,pupilpix)
# --!EOFunc-----------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------getPhasesEvolution------------------------------------------
# show phasescreen movement above the pupil
# --------------------------------------------------------------------------------------------------------------------
# phases - pre-generated phasescreen/visibility screens
# pupilSize=0.5 # pupil diameter in meters (NB: MUST be smaller than phasescreen)
# scale=2048.0 # scale factor (pixels/m)
# v=1.0 # wind velocity (m/s)
# sfrq=10 # number of samples per second
# stime=2.0 # desired sampling time (0 = maximum)
# expTime=0.0 # exposure time for each shot
# expNum=5 # number of frames taken for averaging
# showProgress=True # show processing progress
# returns
# - evolving phasescreens array (pupilScreens).
# If exposure time > 0 then each element of pupilScreens arraycontains
# additional array of arrays with phasescreens over exposure time
# check len(shape(pupilScreens)) before using the output
def getPhasesEvolution(phases,pupilSize,scale,v,sfrq,stime=0.0,expTime=0.0,expNum=5, showProgress=True) :
''' show phasescreen movement above the pupil '''
# duration of sample
sdur=1.0/sfrq
# getting screen size by X axis in pixels
screenpix = phases.shape[1]
# calculating pupil diameter in pixels
pupilpix=int(np.floor(scale*pupilSize))
# calculating Vpix_x - projected speed of wind in pixels/s
vpix_x=int(np.round(np.floor(scale*v)*screenpix/np.hypot(screenpix,pupilpix)))
if stime==0 : # calculating maximum sampling time
stime = (np.floor(screenpix/pupilpix)*screenpix/vpix_x)-expTime
elif (stime+expTime)*vpix_x > np.floor(screenpix/pupilpix)*screenpix :
print("Sampling or exposure time is too long. The maximum time period is %f s" % (1.0*np.floor(screenpix/pupilpix)*screenpix/vpix_x))
return
#
# initializing time counter
time = 0.0
#pupilScreens=[]
if expTime>0.0 and expNum>1 :
pupilScreens=np.zeros((int(stime/sdur)+1,expNum,int(pupilSize*scale),int(pupilSize*scale)),dtype='float')
else :
pupilScreens=np.zeros((int(stime/sdur)+1,int(pupilSize*scale),int(pupilSize*scale)),dtype='float')
# Sampling
time_n=0
while (time<stime) :
if showProgress : print("\nTime=%fs. End time: %fs" % (time,stime))
# exposure time != 0 - smoothing screen
if expTime>0.0 and expNum>1 :
#expScreen=[]
for expFrame in range(0,expNum) :
if showProgress : sys.stdout.write("\r - Exposure screen %d of %d" % (expFrame+1,expNum))
#print(" - Exposure screen %d of %d" % (expFrame+1,expNum))
#expScreen.append(getPupilScreen(phases,pupilSize,scale,v,time+expTime*expFrame/expNum))
pupilScreens[time_n][expFrame]=getPupilScreen(phases,pupilSize,scale,v,time+expTime*expFrame/expNum)
#pupilScreens.append(expScreen)
else :
#pupilScreens.append(getPupilScreen(phases,pupilSize,scale,v,time))
pupilScreens[time_n]=getPupilScreen(phases,pupilSize,scale,v,time)
time+=sdur
time_n+=1
#return np.asarray(pupilScreens)
return pupilScreens
# --!EOFunc-----------------------------------------------------------------------------------------------------------
|
nikhilprathapani/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/test_crypt.py
|
63
|
from test import support
import unittest
crypt = support.import_module('crypt')
class CryptTestCase(unittest.TestCase):
def test_crypt(self):
c = crypt.crypt('mypassword', 'ab')
if support.verbose:
print('Test encryption: ', c)
def test_main():
support.run_unittest(CryptTestCase)
if __name__ == "__main__":
test_main()
|
Woomp/fibonacci
|
refs/heads/develop
|
setup.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-\
from setuptools import find_packages
from setuptools.command.test import test as TestCommand
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = ['tox']
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
setup(
name='fibonacci',
version='0.3.0',
description="A minimal python project for integration test purposes.",
long_description=readme + '\n\n' + history,
author="Maik Figura",
author_email='maiksensi@gmail.com',
url='https://github.com/maiksensi/fibonacci',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
install_requires=requirements,
cmdclass={'test': Tox},
license="BSD",
zip_safe=False,
keywords='fibonacci',
classifiers=[
'Development Status :: 3 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
|
UQ-UQx/old_injestor
|
refs/heads/master
|
base/daemon.py
|
1
|
#!/usr/bin/env python
import sys, os, time, atexit
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def status(self):
running = True
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
running = False
return running
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
|
PepperPD/edx-pepper-platform
|
refs/heads/master
|
common/lib/html_to_text.py
|
244
|
"""Provides a function to convert html to plaintext."""
import logging
from subprocess import Popen, PIPE
log = logging.getLogger(__name__)
def html_to_text(html_message):
"""
Converts an html message to plaintext.
Currently uses lynx in a subprocess; should be refactored to
use something more pythonic.
"""
process = Popen(
['lynx', '-stdin', '-display_charset=UTF-8', '-assume_charset=UTF-8', '-dump'],
stdin=PIPE,
stdout=PIPE
)
# use lynx to get plaintext
(plaintext, err_from_stderr) = process.communicate(
input=html_message.encode('utf-8')
)
if err_from_stderr:
log.info(err_from_stderr)
return plaintext
|
orgito/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_resource_facts.py
|
1
|
#!/usr/bin/python
#
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_resource_facts
version_added: "2.6"
short_description: Generic facts of Azure resources.
description:
- Obtain facts of any resource using Azure REST API.
- This module gives access to resources that are not supported via Ansible modules.
- Refer to https://docs.microsoft.com/en-us/rest/api/ regarding details related to specific resource REST API.
options:
url:
description:
- Azure RM Resource URL.
api_version:
description:
- Specific API version to be used.
required: yes
provider:
description:
- Provider type, should be specified in no URL is given
resource_group:
description:
- Resource group to be used.
- Required if URL is not specified.
resource_type:
description:
- Resource type.
resource_name:
description:
- Resource name.
subresource:
description:
- List of subresources
suboptions:
namespace:
description:
- Subresource namespace
type:
description:
- Subresource type
name:
description:
- Subresource name
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get scaleset info
azure_rm_resource_facts:
resource_group: "{{ resource_group }}"
provider: compute
resource_type: virtualmachinescalesets
resource_name: "{{ scaleset_name }}"
api_version: "2017-12-01"
'''
RETURN = '''
response:
description: Response specific to resource type.
returned: always
type: dict
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
try:
from msrestazure.azure_exceptions import CloudError
from msrest.service_client import ServiceClient
from msrestazure.tools import resource_id, is_valid_resource_id
import json
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMResourceFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
url=dict(
type='str',
required=False
),
provider=dict(
type='str',
required=False
),
resource_group=dict(
type='str',
required=False
),
resource_type=dict(
type='str',
required=False
),
resource_name=dict(
type='str',
required=False
),
subresource=dict(
type='list',
required=False,
default=[]
),
api_version=dict(
type='str',
required=True
)
)
# store the results of the module operation
self.results = dict(
response=None
)
self.mgmt_client = None
self.url = None
self.api_version = None
self.provider = None
self.resource_group = None
self.resource_type = None
self.resource_name = None
self.subresource = []
super(AzureRMResourceFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if self.url is None:
orphan = None
rargs = dict()
rargs['subscription'] = self.subscription_id
rargs['resource_group'] = self.resource_group
if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
rargs['namespace'] = "Microsoft." + self.provider
else:
rargs['namespace'] = self.provider
if self.resource_type is not None and self.resource_name is not None:
rargs['type'] = self.resource_type
rargs['name'] = self.resource_name
for i in range(len(self.subresource)):
resource_ns = self.subresource[i].get('namespace', None)
resource_type = self.subresource[i].get('type', None)
resource_name = self.subresource[i].get('name', None)
if resource_type is not None and resource_name is not None:
rargs['child_namespace_' + str(i + 1)] = resource_ns
rargs['child_type_' + str(i + 1)] = resource_type
rargs['child_name_' + str(i + 1)] = resource_name
else:
orphan = resource_type
else:
orphan = self.resource_type
self.url = resource_id(**rargs)
if orphan is not None:
self.url += '/' + orphan
self.results['url'] = self.url
query_parameters = {}
query_parameters['api-version'] = self.api_version
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
response = self.mgmt_client.query(self.url, "GET", query_parameters, header_parameters, None, [200, 404], 0, 0)
try:
response = json.loads(response.text)
if response is list:
self.results['response'] = response
else:
self.results['response'] = [response]
except Exception:
self.results['response'] = []
return self.results
def main():
AzureRMResourceFacts()
if __name__ == '__main__':
main()
|
aabbox/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/plat-sunos5/IN.py
|
108
|
# Generated by h2py from /usr/include/netinet/in.h
# Included from sys/feature_tests.h
# Included from sys/isa_defs.h
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 8
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_ALIGNMENT_REQUIRED = 1
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 4
_DOUBLE_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 4
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 4
_ALIGNMENT_REQUIRED = 0
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_ALIGNMENT_REQUIRED = 1
_LONG_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 8
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 8
_LONG_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_POSIX_C_SOURCE = 1
_LARGEFILE64_SOURCE = 1
_LARGEFILE_SOURCE = 1
_FILE_OFFSET_BITS = 64
_FILE_OFFSET_BITS = 32
_POSIX_C_SOURCE = 199506
_POSIX_PTHREAD_SEMANTICS = 1
_XOPEN_VERSION = 500
_XOPEN_VERSION = 4
_XOPEN_VERSION = 3
from TYPES import *
# Included from sys/stream.h
# Included from sys/vnode.h
from TYPES import *
# Included from sys/t_lock.h
# Included from sys/machlock.h
from TYPES import *
LOCK_HELD_VALUE = 0xff
def SPIN_LOCK(pl): return ((pl) > ipltospl(LOCK_LEVEL))
def LOCK_SAMPLE_INTERVAL(i): return (((i) & 0xff) == 0)
CLOCK_LEVEL = 10
LOCK_LEVEL = 10
DISP_LEVEL = (LOCK_LEVEL + 1)
PTR24_LSB = 5
PTR24_MSB = (PTR24_LSB + 24)
PTR24_ALIGN = 32
PTR24_BASE = 0xe0000000
# Included from sys/param.h
from TYPES import *
_POSIX_VDISABLE = 0
MAX_INPUT = 512
MAX_CANON = 256
UID_NOBODY = 60001
GID_NOBODY = UID_NOBODY
UID_NOACCESS = 60002
MAX_TASKID = 999999
MAX_MAXPID = 999999
DEFAULT_MAXPID = 999999
DEFAULT_JUMPPID = 100000
DEFAULT_MAXPID = 30000
DEFAULT_JUMPPID = 0
MAXUID = 2147483647
MAXPROJID = MAXUID
MAXLINK = 32767
NMOUNT = 40
CANBSIZ = 256
NOFILE = 20
NGROUPS_UMIN = 0
NGROUPS_UMAX = 32
NGROUPS_MAX_DEFAULT = 16
NZERO = 20
NULL = 0
NULL = 0
CMASK = 0o22
CDLIMIT = (1<<11)
NBPS = 0x20000
NBPSCTR = 512
UBSIZE = 512
SCTRSHFT = 9
SYSNAME = 9
PREMOTE = 39
MAXPATHLEN = 1024
MAXSYMLINKS = 20
MAXNAMELEN = 256
NADDR = 13
PIPE_BUF = 5120
PIPE_MAX = 5120
NBBY = 8
MAXBSIZE = 8192
DEV_BSIZE = 512
DEV_BSHIFT = 9
MAXFRAG = 8
MAXOFF32_T = 0x7fffffff
MAXOFF_T = 0x7fffffffffffffff
MAXOFFSET_T = 0x7fffffffffffffff
MAXOFF_T = 0x7fffffff
MAXOFFSET_T = 0x7fffffff
def btodb(bytes): return \
def dbtob(db): return \
def lbtodb(bytes): return \
def ldbtob(db): return \
NCARGS32 = 0x100000
NCARGS64 = 0x200000
NCARGS = NCARGS64
NCARGS = NCARGS32
FSHIFT = 8
FSCALE = (1<<FSHIFT)
def DELAY(n): return drv_usecwait(n)
def mmu_ptob(x): return ((x) << MMU_PAGESHIFT)
def mmu_btop(x): return (((x)) >> MMU_PAGESHIFT)
def mmu_btopr(x): return ((((x) + MMU_PAGEOFFSET) >> MMU_PAGESHIFT))
def mmu_ptod(x): return ((x) << (MMU_PAGESHIFT - DEV_BSHIFT))
def ptod(x): return ((x) << (PAGESHIFT - DEV_BSHIFT))
def ptob(x): return ((x) << PAGESHIFT)
def btop(x): return (((x) >> PAGESHIFT))
def btopr(x): return ((((x) + PAGEOFFSET) >> PAGESHIFT))
def dtop(DD): return (((DD) + NDPP - 1) >> (PAGESHIFT - DEV_BSHIFT))
def dtopt(DD): return ((DD) >> (PAGESHIFT - DEV_BSHIFT))
_AIO_LISTIO_MAX = (4096)
_AIO_MAX = (-1)
_MQ_OPEN_MAX = (32)
_MQ_PRIO_MAX = (32)
_SEM_NSEMS_MAX = INT_MAX
_SEM_VALUE_MAX = INT_MAX
# Included from sys/unistd.h
_CS_PATH = 65
_CS_LFS_CFLAGS = 68
_CS_LFS_LDFLAGS = 69
_CS_LFS_LIBS = 70
_CS_LFS_LINTFLAGS = 71
_CS_LFS64_CFLAGS = 72
_CS_LFS64_LDFLAGS = 73
_CS_LFS64_LIBS = 74
_CS_LFS64_LINTFLAGS = 75
_CS_XBS5_ILP32_OFF32_CFLAGS = 700
_CS_XBS5_ILP32_OFF32_LDFLAGS = 701
_CS_XBS5_ILP32_OFF32_LIBS = 702
_CS_XBS5_ILP32_OFF32_LINTFLAGS = 703
_CS_XBS5_ILP32_OFFBIG_CFLAGS = 705
_CS_XBS5_ILP32_OFFBIG_LDFLAGS = 706
_CS_XBS5_ILP32_OFFBIG_LIBS = 707
_CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 708
_CS_XBS5_LP64_OFF64_CFLAGS = 709
_CS_XBS5_LP64_OFF64_LDFLAGS = 710
_CS_XBS5_LP64_OFF64_LIBS = 711
_CS_XBS5_LP64_OFF64_LINTFLAGS = 712
_CS_XBS5_LPBIG_OFFBIG_CFLAGS = 713
_CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 714
_CS_XBS5_LPBIG_OFFBIG_LIBS = 715
_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 716
_SC_ARG_MAX = 1
_SC_CHILD_MAX = 2
_SC_CLK_TCK = 3
_SC_NGROUPS_MAX = 4
_SC_OPEN_MAX = 5
_SC_JOB_CONTROL = 6
_SC_SAVED_IDS = 7
_SC_VERSION = 8
_SC_PASS_MAX = 9
_SC_LOGNAME_MAX = 10
_SC_PAGESIZE = 11
_SC_XOPEN_VERSION = 12
_SC_NPROCESSORS_CONF = 14
_SC_NPROCESSORS_ONLN = 15
_SC_STREAM_MAX = 16
_SC_TZNAME_MAX = 17
_SC_AIO_LISTIO_MAX = 18
_SC_AIO_MAX = 19
_SC_AIO_PRIO_DELTA_MAX = 20
_SC_ASYNCHRONOUS_IO = 21
_SC_DELAYTIMER_MAX = 22
_SC_FSYNC = 23
_SC_MAPPED_FILES = 24
_SC_MEMLOCK = 25
_SC_MEMLOCK_RANGE = 26
_SC_MEMORY_PROTECTION = 27
_SC_MESSAGE_PASSING = 28
_SC_MQ_OPEN_MAX = 29
_SC_MQ_PRIO_MAX = 30
_SC_PRIORITIZED_IO = 31
_SC_PRIORITY_SCHEDULING = 32
_SC_REALTIME_SIGNALS = 33
_SC_RTSIG_MAX = 34
_SC_SEMAPHORES = 35
_SC_SEM_NSEMS_MAX = 36
_SC_SEM_VALUE_MAX = 37
_SC_SHARED_MEMORY_OBJECTS = 38
_SC_SIGQUEUE_MAX = 39
_SC_SIGRT_MIN = 40
_SC_SIGRT_MAX = 41
_SC_SYNCHRONIZED_IO = 42
_SC_TIMERS = 43
_SC_TIMER_MAX = 44
_SC_2_C_BIND = 45
_SC_2_C_DEV = 46
_SC_2_C_VERSION = 47
_SC_2_FORT_DEV = 48
_SC_2_FORT_RUN = 49
_SC_2_LOCALEDEF = 50
_SC_2_SW_DEV = 51
_SC_2_UPE = 52
_SC_2_VERSION = 53
_SC_BC_BASE_MAX = 54
_SC_BC_DIM_MAX = 55
_SC_BC_SCALE_MAX = 56
_SC_BC_STRING_MAX = 57
_SC_COLL_WEIGHTS_MAX = 58
_SC_EXPR_NEST_MAX = 59
_SC_LINE_MAX = 60
_SC_RE_DUP_MAX = 61
_SC_XOPEN_CRYPT = 62
_SC_XOPEN_ENH_I18N = 63
_SC_XOPEN_SHM = 64
_SC_2_CHAR_TERM = 66
_SC_XOPEN_XCU_VERSION = 67
_SC_ATEXIT_MAX = 76
_SC_IOV_MAX = 77
_SC_XOPEN_UNIX = 78
_SC_PAGE_SIZE = _SC_PAGESIZE
_SC_T_IOV_MAX = 79
_SC_PHYS_PAGES = 500
_SC_AVPHYS_PAGES = 501
_SC_COHER_BLKSZ = 503
_SC_SPLIT_CACHE = 504
_SC_ICACHE_SZ = 505
_SC_DCACHE_SZ = 506
_SC_ICACHE_LINESZ = 507
_SC_DCACHE_LINESZ = 508
_SC_ICACHE_BLKSZ = 509
_SC_DCACHE_BLKSZ = 510
_SC_DCACHE_TBLKSZ = 511
_SC_ICACHE_ASSOC = 512
_SC_DCACHE_ASSOC = 513
_SC_MAXPID = 514
_SC_STACK_PROT = 515
_SC_THREAD_DESTRUCTOR_ITERATIONS = 568
_SC_GETGR_R_SIZE_MAX = 569
_SC_GETPW_R_SIZE_MAX = 570
_SC_LOGIN_NAME_MAX = 571
_SC_THREAD_KEYS_MAX = 572
_SC_THREAD_STACK_MIN = 573
_SC_THREAD_THREADS_MAX = 574
_SC_TTY_NAME_MAX = 575
_SC_THREADS = 576
_SC_THREAD_ATTR_STACKADDR = 577
_SC_THREAD_ATTR_STACKSIZE = 578
_SC_THREAD_PRIORITY_SCHEDULING = 579
_SC_THREAD_PRIO_INHERIT = 580
_SC_THREAD_PRIO_PROTECT = 581
_SC_THREAD_PROCESS_SHARED = 582
_SC_THREAD_SAFE_FUNCTIONS = 583
_SC_XOPEN_LEGACY = 717
_SC_XOPEN_REALTIME = 718
_SC_XOPEN_REALTIME_THREADS = 719
_SC_XBS5_ILP32_OFF32 = 720
_SC_XBS5_ILP32_OFFBIG = 721
_SC_XBS5_LP64_OFF64 = 722
_SC_XBS5_LPBIG_OFFBIG = 723
_PC_LINK_MAX = 1
_PC_MAX_CANON = 2
_PC_MAX_INPUT = 3
_PC_NAME_MAX = 4
_PC_PATH_MAX = 5
_PC_PIPE_BUF = 6
_PC_NO_TRUNC = 7
_PC_VDISABLE = 8
_PC_CHOWN_RESTRICTED = 9
_PC_ASYNC_IO = 10
_PC_PRIO_IO = 11
_PC_SYNC_IO = 12
_PC_FILESIZEBITS = 67
_PC_LAST = 67
_POSIX_VERSION = 199506
_POSIX2_VERSION = 199209
_POSIX2_C_VERSION = 199209
_XOPEN_XCU_VERSION = 4
_XOPEN_REALTIME = 1
_XOPEN_ENH_I18N = 1
_XOPEN_SHM = 1
_POSIX2_C_BIND = 1
_POSIX2_CHAR_TERM = 1
_POSIX2_LOCALEDEF = 1
_POSIX2_C_DEV = 1
_POSIX2_SW_DEV = 1
_POSIX2_UPE = 1
# Included from sys/mutex.h
from TYPES import *
def MUTEX_HELD(x): return (mutex_owned(x))
# Included from sys/rwlock.h
from TYPES import *
def RW_READ_HELD(x): return (rw_read_held((x)))
def RW_WRITE_HELD(x): return (rw_write_held((x)))
def RW_LOCK_HELD(x): return (rw_lock_held((x)))
def RW_ISWRITER(x): return (rw_iswriter(x))
# Included from sys/semaphore.h
# Included from sys/thread.h
from TYPES import *
# Included from sys/klwp.h
from TYPES import *
# Included from sys/condvar.h
from TYPES import *
# Included from sys/time.h
# Included from sys/types32.h
# Included from sys/int_types.h
TIME32_MAX = INT32_MAX
TIME32_MIN = INT32_MIN
def TIMEVAL_OVERFLOW(tv): return \
from TYPES import *
DST_NONE = 0
DST_USA = 1
DST_AUST = 2
DST_WET = 3
DST_MET = 4
DST_EET = 5
DST_CAN = 6
DST_GB = 7
DST_RUM = 8
DST_TUR = 9
DST_AUSTALT = 10
ITIMER_REAL = 0
ITIMER_VIRTUAL = 1
ITIMER_PROF = 2
ITIMER_REALPROF = 3
def ITIMERVAL_OVERFLOW(itv): return \
SEC = 1
MILLISEC = 1000
MICROSEC = 1000000
NANOSEC = 1000000000
# Included from sys/time_impl.h
def TIMESPEC_OVERFLOW(ts): return \
def ITIMERSPEC_OVERFLOW(it): return \
__CLOCK_REALTIME0 = 0
CLOCK_VIRTUAL = 1
CLOCK_PROF = 2
__CLOCK_REALTIME3 = 3
CLOCK_HIGHRES = 4
CLOCK_MAX = 5
CLOCK_REALTIME = __CLOCK_REALTIME3
CLOCK_REALTIME = __CLOCK_REALTIME0
TIMER_RELTIME = 0x0
TIMER_ABSTIME = 0x1
def TICK_TO_SEC(tick): return ((tick) / hz)
def SEC_TO_TICK(sec): return ((sec) * hz)
def TICK_TO_MSEC(tick): return \
def MSEC_TO_TICK(msec): return \
def MSEC_TO_TICK_ROUNDUP(msec): return \
def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
def USEC_TO_TICK_ROUNDUP(usec): return \
def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
def NSEC_TO_TICK_ROUNDUP(nsec): return \
def TIMEVAL_TO_TICK(tvp): return \
def TIMESTRUC_TO_TICK(tsp): return \
# Included from time.h
from TYPES import *
# Included from iso/time_iso.h
NULL = 0
NULL = 0
CLOCKS_PER_SEC = 1000000
# Included from sys/select.h
FD_SETSIZE = 65536
FD_SETSIZE = 1024
_NBBY = 8
NBBY = _NBBY
def FD_ZERO(p): return bzero((p), sizeof (*(p)))
# Included from sys/signal.h
# Included from sys/iso/signal_iso.h
SIGHUP = 1
SIGINT = 2
SIGQUIT = 3
SIGILL = 4
SIGTRAP = 5
SIGIOT = 6
SIGABRT = 6
SIGEMT = 7
SIGFPE = 8
SIGKILL = 9
SIGBUS = 10
SIGSEGV = 11
SIGSYS = 12
SIGPIPE = 13
SIGALRM = 14
SIGTERM = 15
SIGUSR1 = 16
SIGUSR2 = 17
SIGCLD = 18
SIGCHLD = 18
SIGPWR = 19
SIGWINCH = 20
SIGURG = 21
SIGPOLL = 22
SIGIO = SIGPOLL
SIGSTOP = 23
SIGTSTP = 24
SIGCONT = 25
SIGTTIN = 26
SIGTTOU = 27
SIGVTALRM = 28
SIGPROF = 29
SIGXCPU = 30
SIGXFSZ = 31
SIGWAITING = 32
SIGLWP = 33
SIGFREEZE = 34
SIGTHAW = 35
SIGCANCEL = 36
SIGLOST = 37
_SIGRTMIN = 38
_SIGRTMAX = 45
SIG_BLOCK = 1
SIG_UNBLOCK = 2
SIG_SETMASK = 3
SIGNO_MASK = 0xFF
SIGDEFER = 0x100
SIGHOLD = 0x200
SIGRELSE = 0x400
SIGIGNORE = 0x800
SIGPAUSE = 0x1000
# Included from sys/siginfo.h
from TYPES import *
SIGEV_NONE = 1
SIGEV_SIGNAL = 2
SIGEV_THREAD = 3
SI_NOINFO = 32767
SI_USER = 0
SI_LWP = (-1)
SI_QUEUE = (-2)
SI_TIMER = (-3)
SI_ASYNCIO = (-4)
SI_MESGQ = (-5)
# Included from sys/machsig.h
ILL_ILLOPC = 1
ILL_ILLOPN = 2
ILL_ILLADR = 3
ILL_ILLTRP = 4
ILL_PRVOPC = 5
ILL_PRVREG = 6
ILL_COPROC = 7
ILL_BADSTK = 8
NSIGILL = 8
EMT_TAGOVF = 1
EMT_CPCOVF = 2
NSIGEMT = 2
FPE_INTDIV = 1
FPE_INTOVF = 2
FPE_FLTDIV = 3
FPE_FLTOVF = 4
FPE_FLTUND = 5
FPE_FLTRES = 6
FPE_FLTINV = 7
FPE_FLTSUB = 8
NSIGFPE = 8
SEGV_MAPERR = 1
SEGV_ACCERR = 2
NSIGSEGV = 2
BUS_ADRALN = 1
BUS_ADRERR = 2
BUS_OBJERR = 3
NSIGBUS = 3
TRAP_BRKPT = 1
TRAP_TRACE = 2
TRAP_RWATCH = 3
TRAP_WWATCH = 4
TRAP_XWATCH = 5
NSIGTRAP = 5
CLD_EXITED = 1
CLD_KILLED = 2
CLD_DUMPED = 3
CLD_TRAPPED = 4
CLD_STOPPED = 5
CLD_CONTINUED = 6
NSIGCLD = 6
POLL_IN = 1
POLL_OUT = 2
POLL_MSG = 3
POLL_ERR = 4
POLL_PRI = 5
POLL_HUP = 6
NSIGPOLL = 6
PROF_SIG = 1
NSIGPROF = 1
SI_MAXSZ = 256
SI_MAXSZ = 128
# Included from sys/time_std_impl.h
from TYPES import *
SI32_MAXSZ = 128
def SI_CANQUEUE(c): return ((c) <= SI_QUEUE)
SA_NOCLDSTOP = 0x00020000
SA_ONSTACK = 0x00000001
SA_RESETHAND = 0x00000002
SA_RESTART = 0x00000004
SA_SIGINFO = 0x00000008
SA_NODEFER = 0x00000010
SA_NOCLDWAIT = 0x00010000
SA_WAITSIG = 0x00010000
NSIG = 46
MAXSIG = 45
S_SIGNAL = 1
S_SIGSET = 2
S_SIGACTION = 3
S_NONE = 4
MINSIGSTKSZ = 2048
SIGSTKSZ = 8192
SS_ONSTACK = 0x00000001
SS_DISABLE = 0x00000002
SN_PROC = 1
SN_CANCEL = 2
SN_SEND = 3
# Included from sys/ucontext.h
from TYPES import *
# Included from sys/regset.h
REG_CCR = (0)
REG_PSR = (0)
REG_PSR = (0)
REG_PC = (1)
REG_nPC = (2)
REG_Y = (3)
REG_G1 = (4)
REG_G2 = (5)
REG_G3 = (6)
REG_G4 = (7)
REG_G5 = (8)
REG_G6 = (9)
REG_G7 = (10)
REG_O0 = (11)
REG_O1 = (12)
REG_O2 = (13)
REG_O3 = (14)
REG_O4 = (15)
REG_O5 = (16)
REG_O6 = (17)
REG_O7 = (18)
REG_ASI = (19)
REG_FPRS = (20)
REG_PS = REG_PSR
REG_SP = REG_O6
REG_R0 = REG_O0
REG_R1 = REG_O1
_NGREG = 21
_NGREG = 19
NGREG = _NGREG
_NGREG32 = 19
_NGREG64 = 21
SPARC_MAXREGWINDOW = 31
MAXFPQ = 16
XRS_ID = 0x78727300
# Included from v7/sys/privregs.h
# Included from v7/sys/psr.h
PSR_CWP = 0x0000001F
PSR_ET = 0x00000020
PSR_PS = 0x00000040
PSR_S = 0x00000080
PSR_PIL = 0x00000F00
PSR_EF = 0x00001000
PSR_EC = 0x00002000
PSR_RSV = 0x000FC000
PSR_ICC = 0x00F00000
PSR_C = 0x00100000
PSR_V = 0x00200000
PSR_Z = 0x00400000
PSR_N = 0x00800000
PSR_VER = 0x0F000000
PSR_IMPL = 0xF0000000
PSL_ALLCC = PSR_ICC
PSL_USER = (PSR_S)
PSL_USERMASK = (PSR_ICC)
PSL_UBITS = (PSR_ICC|PSR_EF)
def USERMODE(ps): return (((ps) & PSR_PS) == 0)
# Included from sys/fsr.h
FSR_CEXC = 0x0000001f
FSR_AEXC = 0x000003e0
FSR_FCC = 0x00000c00
FSR_PR = 0x00001000
FSR_QNE = 0x00002000
FSR_FTT = 0x0001c000
FSR_VER = 0x000e0000
FSR_TEM = 0x0f800000
FSR_RP = 0x30000000
FSR_RD = 0xc0000000
FSR_VER_SHIFT = 17
FSR_FCC1 = 0x00000003
FSR_FCC2 = 0x0000000C
FSR_FCC3 = 0x00000030
FSR_CEXC_NX = 0x00000001
FSR_CEXC_DZ = 0x00000002
FSR_CEXC_UF = 0x00000004
FSR_CEXC_OF = 0x00000008
FSR_CEXC_NV = 0x00000010
FSR_AEXC_NX = (0x1 << 5)
FSR_AEXC_DZ = (0x2 << 5)
FSR_AEXC_UF = (0x4 << 5)
FSR_AEXC_OF = (0x8 << 5)
FSR_AEXC_NV = (0x10 << 5)
FTT_NONE = 0
FTT_IEEE = 1
FTT_UNFIN = 2
FTT_UNIMP = 3
FTT_SEQ = 4
FTT_ALIGN = 5
FTT_DFAULT = 6
FSR_FTT_SHIFT = 14
FSR_FTT_IEEE = (FTT_IEEE << FSR_FTT_SHIFT)
FSR_FTT_UNFIN = (FTT_UNFIN << FSR_FTT_SHIFT)
FSR_FTT_UNIMP = (FTT_UNIMP << FSR_FTT_SHIFT)
FSR_FTT_SEQ = (FTT_SEQ << FSR_FTT_SHIFT)
FSR_FTT_ALIGN = (FTT_ALIGN << FSR_FTT_SHIFT)
FSR_FTT_DFAULT = (FTT_DFAULT << FSR_FTT_SHIFT)
FSR_TEM_NX = (0x1 << 23)
FSR_TEM_DZ = (0x2 << 23)
FSR_TEM_UF = (0x4 << 23)
FSR_TEM_OF = (0x8 << 23)
FSR_TEM_NV = (0x10 << 23)
RP_DBLEXT = 0
RP_SINGLE = 1
RP_DOUBLE = 2
RP_RESERVED = 3
RD_NEAR = 0
RD_ZER0 = 1
RD_POSINF = 2
RD_NEGINF = 3
FPRS_DL = 0x1
FPRS_DU = 0x2
FPRS_FEF = 0x4
PIL_MAX = 0xf
def SAVE_GLOBALS(RP): return \
def RESTORE_GLOBALS(RP): return \
def SAVE_OUTS(RP): return \
def RESTORE_OUTS(RP): return \
def SAVE_WINDOW(SBP): return \
def RESTORE_WINDOW(SBP): return \
def STORE_FPREGS(FP): return \
def LOAD_FPREGS(FP): return \
_SPARC_MAXREGWINDOW = 31
_XRS_ID = 0x78727300
GETCONTEXT = 0
SETCONTEXT = 1
UC_SIGMASK = 0o01
UC_STACK = 0o02
UC_CPU = 0o04
UC_MAU = 0o10
UC_FPU = UC_MAU
UC_INTR = 0o20
UC_ASR = 0o40
UC_MCONTEXT = (UC_CPU|UC_FPU|UC_ASR)
UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
_SIGQUEUE_MAX = 32
_SIGNOTIFY_MAX = 32
# Included from sys/pcb.h
INSTR_VALID = 0x02
NORMAL_STEP = 0x04
WATCH_STEP = 0x08
CPC_OVERFLOW = 0x10
ASYNC_HWERR = 0x20
STEP_NONE = 0
STEP_REQUESTED = 1
STEP_ACTIVE = 2
STEP_WASACTIVE = 3
# Included from sys/msacct.h
LMS_USER = 0
LMS_SYSTEM = 1
LMS_TRAP = 2
LMS_TFAULT = 3
LMS_DFAULT = 4
LMS_KFAULT = 5
LMS_USER_LOCK = 6
LMS_SLEEP = 7
LMS_WAIT_CPU = 8
LMS_STOPPED = 9
NMSTATES = 10
# Included from sys/lwp.h
# Included from sys/synch.h
from TYPES import *
USYNC_THREAD = 0x00
USYNC_PROCESS = 0x01
LOCK_NORMAL = 0x00
LOCK_ERRORCHECK = 0x02
LOCK_RECURSIVE = 0x04
USYNC_PROCESS_ROBUST = 0x08
LOCK_PRIO_NONE = 0x00
LOCK_PRIO_INHERIT = 0x10
LOCK_PRIO_PROTECT = 0x20
LOCK_STALL_NP = 0x00
LOCK_ROBUST_NP = 0x40
LOCK_OWNERDEAD = 0x1
LOCK_NOTRECOVERABLE = 0x2
LOCK_INITED = 0x4
LOCK_UNMAPPED = 0x8
LWP_DETACHED = 0x00000040
LWP_SUSPENDED = 0x00000080
__LWP_ASLWP = 0x00000100
MAXSYSARGS = 8
NORMALRETURN = 0
JUSTRETURN = 1
LWP_USER = 0x01
LWP_SYS = 0x02
TS_FREE = 0x00
TS_SLEEP = 0x01
TS_RUN = 0x02
TS_ONPROC = 0x04
TS_ZOMB = 0x08
TS_STOPPED = 0x10
T_INTR_THREAD = 0x0001
T_WAKEABLE = 0x0002
T_TOMASK = 0x0004
T_TALLOCSTK = 0x0008
T_WOULDBLOCK = 0x0020
T_DONTBLOCK = 0x0040
T_DONTPEND = 0x0080
T_SYS_PROF = 0x0100
T_WAITCVSEM = 0x0200
T_WATCHPT = 0x0400
T_PANIC = 0x0800
TP_HOLDLWP = 0x0002
TP_TWAIT = 0x0004
TP_LWPEXIT = 0x0008
TP_PRSTOP = 0x0010
TP_CHKPT = 0x0020
TP_EXITLWP = 0x0040
TP_PRVSTOP = 0x0080
TP_MSACCT = 0x0100
TP_STOPPING = 0x0200
TP_WATCHPT = 0x0400
TP_PAUSE = 0x0800
TP_CHANGEBIND = 0x1000
TS_LOAD = 0x0001
TS_DONT_SWAP = 0x0002
TS_SWAPENQ = 0x0004
TS_ON_SWAPQ = 0x0008
TS_CSTART = 0x0100
TS_UNPAUSE = 0x0200
TS_XSTART = 0x0400
TS_PSTART = 0x0800
TS_RESUME = 0x1000
TS_CREATE = 0x2000
TS_ALLSTART = \
(TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE)
def CPR_VSTOPPED(t): return \
def THREAD_TRANSITION(tp): return thread_transition(tp);
def THREAD_STOP(tp): return \
def THREAD_ZOMB(tp): return THREAD_SET_STATE(tp, TS_ZOMB, NULL)
def SEMA_HELD(x): return (sema_held((x)))
NO_LOCKS_HELD = 1
NO_COMPETING_THREADS = 1
# Included from sys/cred.h
# Included from sys/uio.h
from TYPES import *
# Included from sys/resource.h
from TYPES import *
PRIO_PROCESS = 0
PRIO_PGRP = 1
PRIO_USER = 2
RLIMIT_CPU = 0
RLIMIT_FSIZE = 1
RLIMIT_DATA = 2
RLIMIT_STACK = 3
RLIMIT_CORE = 4
RLIMIT_NOFILE = 5
RLIMIT_VMEM = 6
RLIMIT_AS = RLIMIT_VMEM
RLIM_NLIMITS = 7
RLIM_INFINITY = (-3)
RLIM_SAVED_MAX = (-2)
RLIM_SAVED_CUR = (-1)
RLIM_INFINITY = 0x7fffffff
RLIM_SAVED_MAX = 0x7ffffffe
RLIM_SAVED_CUR = 0x7ffffffd
RLIM32_INFINITY = 0x7fffffff
RLIM32_SAVED_MAX = 0x7ffffffe
RLIM32_SAVED_CUR = 0x7ffffffd
# Included from sys/model.h
# Included from sys/debug.h
def ASSERT64(x): return ASSERT(x)
def ASSERT32(x): return ASSERT(x)
DATAMODEL_MASK = 0x0FF00000
DATAMODEL_ILP32 = 0x00100000
DATAMODEL_LP64 = 0x00200000
DATAMODEL_NONE = 0
DATAMODEL_NATIVE = DATAMODEL_LP64
DATAMODEL_NATIVE = DATAMODEL_ILP32
def STRUCT_SIZE(handle): return \
def STRUCT_BUF(handle): return ((handle).ptr.m64)
def SIZEOF_PTR(umodel): return \
def STRUCT_SIZE(handle): return (sizeof (*(handle).ptr))
def STRUCT_BUF(handle): return ((handle).ptr)
def SIZEOF_PTR(umodel): return sizeof (caddr_t)
def lwp_getdatamodel(t): return DATAMODEL_ILP32
RUSAGE_SELF = 0
RUSAGE_CHILDREN = -1
# Included from vm/seg_enum.h
# Included from sys/buf.h
# Included from sys/kstat.h
from TYPES import *
KSTAT_STRLEN = 31
def KSTAT_ENTER(k): return \
def KSTAT_EXIT(k): return \
KSTAT_TYPE_RAW = 0
KSTAT_TYPE_NAMED = 1
KSTAT_TYPE_INTR = 2
KSTAT_TYPE_IO = 3
KSTAT_TYPE_TIMER = 4
KSTAT_NUM_TYPES = 5
KSTAT_FLAG_VIRTUAL = 0x01
KSTAT_FLAG_VAR_SIZE = 0x02
KSTAT_FLAG_WRITABLE = 0x04
KSTAT_FLAG_PERSISTENT = 0x08
KSTAT_FLAG_DORMANT = 0x10
KSTAT_FLAG_INVALID = 0x20
KSTAT_READ = 0
KSTAT_WRITE = 1
KSTAT_DATA_CHAR = 0
KSTAT_DATA_INT32 = 1
KSTAT_DATA_UINT32 = 2
KSTAT_DATA_INT64 = 3
KSTAT_DATA_UINT64 = 4
KSTAT_DATA_LONG = KSTAT_DATA_INT32
KSTAT_DATA_ULONG = KSTAT_DATA_UINT32
KSTAT_DATA_LONG = KSTAT_DATA_INT64
KSTAT_DATA_ULONG = KSTAT_DATA_UINT64
KSTAT_DATA_LONG = 7
KSTAT_DATA_ULONG = 8
KSTAT_DATA_LONGLONG = KSTAT_DATA_INT64
KSTAT_DATA_ULONGLONG = KSTAT_DATA_UINT64
KSTAT_DATA_FLOAT = 5
KSTAT_DATA_DOUBLE = 6
KSTAT_INTR_HARD = 0
KSTAT_INTR_SOFT = 1
KSTAT_INTR_WATCHDOG = 2
KSTAT_INTR_SPURIOUS = 3
KSTAT_INTR_MULTSVC = 4
KSTAT_NUM_INTRS = 5
B_BUSY = 0x0001
B_DONE = 0x0002
B_ERROR = 0x0004
B_PAGEIO = 0x0010
B_PHYS = 0x0020
B_READ = 0x0040
B_WRITE = 0x0100
B_KERNBUF = 0x0008
B_WANTED = 0x0080
B_AGE = 0x000200
B_ASYNC = 0x000400
B_DELWRI = 0x000800
B_STALE = 0x001000
B_DONTNEED = 0x002000
B_REMAPPED = 0x004000
B_FREE = 0x008000
B_INVAL = 0x010000
B_FORCE = 0x020000
B_HEAD = 0x040000
B_NOCACHE = 0x080000
B_TRUNC = 0x100000
B_SHADOW = 0x200000
B_RETRYWRI = 0x400000
def notavail(bp): return \
def BWRITE(bp): return \
def BWRITE2(bp): return \
VROOT = 0x01
VNOCACHE = 0x02
VNOMAP = 0x04
VDUP = 0x08
VNOSWAP = 0x10
VNOMOUNT = 0x20
VISSWAP = 0x40
VSWAPLIKE = 0x80
VVFSLOCK = 0x100
VVFSWAIT = 0x200
VVMLOCK = 0x400
VDIROPEN = 0x800
VVMEXEC = 0x1000
VPXFS = 0x2000
AT_TYPE = 0x0001
AT_MODE = 0x0002
AT_UID = 0x0004
AT_GID = 0x0008
AT_FSID = 0x0010
AT_NODEID = 0x0020
AT_NLINK = 0x0040
AT_SIZE = 0x0080
AT_ATIME = 0x0100
AT_MTIME = 0x0200
AT_CTIME = 0x0400
AT_RDEV = 0x0800
AT_BLKSIZE = 0x1000
AT_NBLOCKS = 0x2000
AT_VCODE = 0x4000
AT_ALL = (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\
AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\
AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
AT_STAT = (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV)
AT_TIMES = (AT_ATIME|AT_MTIME|AT_CTIME)
AT_NOSET = (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|AT_TYPE|\
AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
VSUID = 0o4000
VSGID = 0o2000
VSVTX = 0o1000
VREAD = 0o0400
VWRITE = 0o0200
VEXEC = 0o0100
MODEMASK = 0o7777
PERMMASK = 0o0777
def MANDMODE(mode): return (((mode) & (VSGID|(VEXEC>>3))) == VSGID)
VSA_ACL = 0x0001
VSA_ACLCNT = 0x0002
VSA_DFACL = 0x0004
VSA_DFACLCNT = 0x0008
LOOKUP_DIR = 0x01
DUMP_ALLOC = 0
DUMP_FREE = 1
DUMP_SCAN = 2
ATTR_UTIME = 0x01
ATTR_EXEC = 0x02
ATTR_COMM = 0x04
ATTR_HINT = 0x08
ATTR_REAL = 0x10
# Included from sys/poll.h
POLLIN = 0x0001
POLLPRI = 0x0002
POLLOUT = 0x0004
POLLRDNORM = 0x0040
POLLWRNORM = POLLOUT
POLLRDBAND = 0x0080
POLLWRBAND = 0x0100
POLLNORM = POLLRDNORM
POLLERR = 0x0008
POLLHUP = 0x0010
POLLNVAL = 0x0020
POLLREMOVE = 0x0800
POLLRDDATA = 0x0200
POLLNOERR = 0x0400
POLLCLOSED = 0x8000
# Included from sys/strmdep.h
def str_aligned(X): return (((ulong_t)(X) & (sizeof (int) - 1)) == 0)
# Included from sys/strft.h
tdelta_t_sz = 12
FTEV_MASK = 0x1FFF
FTEV_ISWR = 0x8000
FTEV_CS = 0x4000
FTEV_PS = 0x2000
FTEV_QMASK = 0x1F00
FTEV_ALLOCMASK = 0x1FF8
FTEV_ALLOCB = 0x0000
FTEV_ESBALLOC = 0x0001
FTEV_DESBALLOC = 0x0002
FTEV_ESBALLOCA = 0x0003
FTEV_DESBALLOCA = 0x0004
FTEV_ALLOCBIG = 0x0005
FTEV_ALLOCBW = 0x0006
FTEV_FREEB = 0x0008
FTEV_DUPB = 0x0009
FTEV_COPYB = 0x000A
FTEV_CALLER = 0x000F
FTEV_PUT = 0x0100
FTEV_FSYNCQ = 0x0103
FTEV_DSYNCQ = 0x0104
FTEV_PUTQ = 0x0105
FTEV_GETQ = 0x0106
FTEV_RMVQ = 0x0107
FTEV_INSQ = 0x0108
FTEV_PUTBQ = 0x0109
FTEV_FLUSHQ = 0x010A
FTEV_REPLYQ = 0x010B
FTEV_PUTNEXT = 0x010D
FTEV_RWNEXT = 0x010E
FTEV_QWINNER = 0x010F
FTEV_GEWRITE = 0x0101
def FTFLW_HASH(h): return (((unsigned)(h))%ftflw_hash_sz)
FTBLK_EVNTS = 0x9
QENAB = 0x00000001
QWANTR = 0x00000002
QWANTW = 0x00000004
QFULL = 0x00000008
QREADR = 0x00000010
QUSE = 0x00000020
QNOENB = 0x00000040
QBACK = 0x00000100
QHLIST = 0x00000200
QPAIR = 0x00000800
QPERQ = 0x00001000
QPERMOD = 0x00002000
QMTSAFE = 0x00004000
QMTOUTPERIM = 0x00008000
QMT_TYPEMASK = (QPAIR|QPERQ|QPERMOD|QMTSAFE|QMTOUTPERIM)
QINSERVICE = 0x00010000
QWCLOSE = 0x00020000
QEND = 0x00040000
QWANTWSYNC = 0x00080000
QSYNCSTR = 0x00100000
QISDRV = 0x00200000
QHOT = 0x00400000
QNEXTHOT = 0x00800000
_QINSERTING = 0x04000000
_QREMOVING = 0x08000000
Q_SQQUEUED = 0x01
Q_SQDRAINING = 0x02
QB_FULL = 0x01
QB_WANTW = 0x02
QB_BACK = 0x04
NBAND = 256
STRUIOT_NONE = -1
STRUIOT_DONTCARE = 0
STRUIOT_STANDARD = 1
STRUIOT_IP = 2
DBLK_REFMIN = 0x01
STRUIO_SPEC = 0x01
STRUIO_DONE = 0x02
STRUIO_IP = 0x04
STRUIO_ZC = 0x08
STRUIO_ICK = 0x10
MSGMARK = 0x01
MSGNOLOOP = 0x02
MSGDELIM = 0x04
MSGNOGET = 0x08
MSGMARKNEXT = 0x10
MSGNOTMARKNEXT = 0x20
M_DATA = 0x00
M_PROTO = 0x01
M_BREAK = 0x08
M_PASSFP = 0x09
M_EVENT = 0x0a
M_SIG = 0x0b
M_DELAY = 0x0c
M_CTL = 0x0d
M_IOCTL = 0x0e
M_SETOPTS = 0x10
M_RSE = 0x11
M_IOCACK = 0x81
M_IOCNAK = 0x82
M_PCPROTO = 0x83
M_PCSIG = 0x84
M_READ = 0x85
M_FLUSH = 0x86
M_STOP = 0x87
M_START = 0x88
M_HANGUP = 0x89
M_ERROR = 0x8a
M_COPYIN = 0x8b
M_COPYOUT = 0x8c
M_IOCDATA = 0x8d
M_PCRSE = 0x8e
M_STOPI = 0x8f
M_STARTI = 0x90
M_PCEVENT = 0x91
M_UNHANGUP = 0x92
QNORM = 0x00
QPCTL = 0x80
IOC_MODELS = DATAMODEL_MASK
IOC_ILP32 = DATAMODEL_ILP32
IOC_LP64 = DATAMODEL_LP64
IOC_NATIVE = DATAMODEL_NATIVE
IOC_NONE = DATAMODEL_NONE
STRCANON = 0x01
RECOPY = 0x02
SO_ALL = 0x003f
SO_READOPT = 0x0001
SO_WROFF = 0x0002
SO_MINPSZ = 0x0004
SO_MAXPSZ = 0x0008
SO_HIWAT = 0x0010
SO_LOWAT = 0x0020
SO_MREADON = 0x0040
SO_MREADOFF = 0x0080
SO_NDELON = 0x0100
SO_NDELOFF = 0x0200
SO_ISTTY = 0x0400
SO_ISNTTY = 0x0800
SO_TOSTOP = 0x1000
SO_TONSTOP = 0x2000
SO_BAND = 0x4000
SO_DELIM = 0x8000
SO_NODELIM = 0x010000
SO_STRHOLD = 0x020000
SO_ERROPT = 0x040000
SO_COPYOPT = 0x080000
SO_MAXBLK = 0x100000
DEF_IOV_MAX = 16
INFOD_FIRSTBYTES = 0x02
INFOD_BYTES = 0x04
INFOD_COUNT = 0x08
INFOD_COPYOUT = 0x10
MODOPEN = 0x1
CLONEOPEN = 0x2
CONSOPEN = 0x4
OPENFAIL = -1
BPRI_LO = 1
BPRI_MED = 2
BPRI_HI = 3
BPRI_FT = 4
INFPSZ = -1
FLUSHALL = 1
FLUSHDATA = 0
STRHIGH = 5120
STRLOW = 1024
MAXIOCBSZ = 1024
PERIM_INNER = 1
PERIM_OUTER = 2
def datamsg(type): return \
def straln(a): return (caddr_t)((intptr_t)(a) & ~(sizeof (int)-1))
# Included from sys/byteorder.h
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
IPPROTO_IP = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_ENCAP = 4
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_IPV6 = 41
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_RSVP = 46
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_HELLO = 63
IPPROTO_ND = 77
IPPROTO_EON = 80
IPPROTO_PIM = 103
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_ECHO = 7
IPPORT_DISCARD = 9
IPPORT_SYSTAT = 11
IPPORT_DAYTIME = 13
IPPORT_NETSTAT = 15
IPPORT_FTP = 21
IPPORT_TELNET = 23
IPPORT_SMTP = 25
IPPORT_TIMESERVER = 37
IPPORT_NAMESERVER = 42
IPPORT_WHOIS = 43
IPPORT_MTP = 57
IPPORT_BOOTPS = 67
IPPORT_BOOTPC = 68
IPPORT_TFTP = 69
IPPORT_RJE = 77
IPPORT_FINGER = 79
IPPORT_TTYLINK = 87
IPPORT_SUPDUP = 95
IPPORT_EXECSERVER = 512
IPPORT_LOGINSERVER = 513
IPPORT_CMDSERVER = 514
IPPORT_EFSSERVER = 520
IPPORT_BIFFUDP = 512
IPPORT_WHOSERVER = 513
IPPORT_ROUTESERVER = 520
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IMPLINK_IP = 155
IMPLINK_LOWEXPER = 156
IMPLINK_HIGHEXPER = 158
IN_CLASSA_NSHIFT = 24
IN_CLASSA_MAX = 128
IN_CLASSB_NSHIFT = 16
IN_CLASSB_MAX = 65536
IN_CLASSC_NSHIFT = 8
IN_CLASSD_NSHIFT = 28
def IN_MULTICAST(i): return IN_CLASSD(i)
IN_LOOPBACKNET = 127
def IN_SET_LOOPBACK_ADDR(a): return \
def IN6_IS_ADDR_UNSPECIFIED(addr): return \
def IN6_IS_ADDR_LOOPBACK(addr): return \
def IN6_IS_ADDR_LOOPBACK(addr): return \
def IN6_IS_ADDR_MULTICAST(addr): return \
def IN6_IS_ADDR_MULTICAST(addr): return \
def IN6_IS_ADDR_LINKLOCAL(addr): return \
def IN6_IS_ADDR_LINKLOCAL(addr): return \
def IN6_IS_ADDR_SITELOCAL(addr): return \
def IN6_IS_ADDR_SITELOCAL(addr): return \
def IN6_IS_ADDR_V4MAPPED(addr): return \
def IN6_IS_ADDR_V4MAPPED(addr): return \
def IN6_IS_ADDR_V4MAPPED_ANY(addr): return \
def IN6_IS_ADDR_V4MAPPED_ANY(addr): return \
def IN6_IS_ADDR_V4COMPAT(addr): return \
def IN6_IS_ADDR_V4COMPAT(addr): return \
def IN6_IS_ADDR_MC_RESERVED(addr): return \
def IN6_IS_ADDR_MC_RESERVED(addr): return \
def IN6_IS_ADDR_MC_NODELOCAL(addr): return \
def IN6_IS_ADDR_MC_NODELOCAL(addr): return \
def IN6_IS_ADDR_MC_LINKLOCAL(addr): return \
def IN6_IS_ADDR_MC_LINKLOCAL(addr): return \
def IN6_IS_ADDR_MC_SITELOCAL(addr): return \
def IN6_IS_ADDR_MC_SITELOCAL(addr): return \
def IN6_IS_ADDR_MC_ORGLOCAL(addr): return \
def IN6_IS_ADDR_MC_ORGLOCAL(addr): return \
def IN6_IS_ADDR_MC_GLOBAL(addr): return \
def IN6_IS_ADDR_MC_GLOBAL(addr): return \
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 0x10
IP_MULTICAST_TTL = 0x11
IP_MULTICAST_LOOP = 0x12
IP_ADD_MEMBERSHIP = 0x13
IP_DROP_MEMBERSHIP = 0x14
IP_SEC_OPT = 0x22
IPSEC_PREF_NEVER = 0x01
IPSEC_PREF_REQUIRED = 0x02
IPSEC_PREF_UNIQUE = 0x04
IP_ADD_PROXY_ADDR = 0x40
IP_BOUND_IF = 0x41
IP_UNSPEC_SRC = 0x42
IP_REUSEADDR = 0x104
IP_DONTROUTE = 0x105
IP_BROADCAST = 0x106
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_UNICAST_HOPS = 0x5
IPV6_MULTICAST_IF = 0x6
IPV6_MULTICAST_HOPS = 0x7
IPV6_MULTICAST_LOOP = 0x8
IPV6_JOIN_GROUP = 0x9
IPV6_LEAVE_GROUP = 0xa
IPV6_ADD_MEMBERSHIP = 0x9
IPV6_DROP_MEMBERSHIP = 0xa
IPV6_PKTINFO = 0xb
IPV6_HOPLIMIT = 0xc
IPV6_NEXTHOP = 0xd
IPV6_HOPOPTS = 0xe
IPV6_DSTOPTS = 0xf
IPV6_RTHDR = 0x10
IPV6_RTHDRDSTOPTS = 0x11
IPV6_RECVPKTINFO = 0x12
IPV6_RECVHOPLIMIT = 0x13
IPV6_RECVHOPOPTS = 0x14
IPV6_RECVDSTOPTS = 0x15
IPV6_RECVRTHDR = 0x16
IPV6_RECVRTHDRDSTOPTS = 0x17
IPV6_CHECKSUM = 0x18
IPV6_BOUND_IF = 0x41
IPV6_UNSPEC_SRC = 0x42
INET_ADDRSTRLEN = 16
INET6_ADDRSTRLEN = 46
IPV6_PAD1_OPT = 0
|
waseem18/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/web/test/test_wsgi.py
|
18
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.wsgi}.
"""
__metaclass__ = type
from sys import exc_info
from urllib import quote
from thread import get_ident
import StringIO, cStringIO, tempfile
from zope.interface.verify import verifyObject
from twisted.python.compat import set
from twisted.python.log import addObserver, removeObserver, err
from twisted.python.failure import Failure
from twisted.python.threadpool import ThreadPool
from twisted.internet.defer import Deferred, gatherResults
from twisted.internet import reactor
from twisted.internet.error import ConnectionLost
from twisted.trial.unittest import TestCase
from twisted.web import http
from twisted.web.resource import IResource, Resource
from twisted.web.server import Request, Site, version
from twisted.web.wsgi import WSGIResource
from twisted.web.test.test_web import DummyChannel
class SynchronousThreadPool:
"""
A single-threaded implementation of part of the L{ThreadPool} interface.
This implementation calls functions synchronously rather than running
them in a thread pool. It is used to make the tests which are not
directly for thread-related behavior deterministic.
"""
def callInThread(self, f, *a, **kw):
"""
Call C{f(*a, **kw)} in this thread rather than scheduling it to be
called in a thread.
"""
try:
f(*a, **kw)
except:
# callInThread doesn't let exceptions propagate to the caller.
# None is always returned and any exception raised gets logged
# later on.
err(None, "Callable passed to SynchronousThreadPool.callInThread failed")
class SynchronousReactorThreads:
"""
A single-threaded implementation of part of the L{IReactorThreads}
interface. This implementation assumes that it will only be invoked
from the reactor thread, so it calls functions synchronously rather than
trying to schedule them to run in the reactor thread. It is used in
conjunction with L{SynchronousThreadPool} to make the tests which are
not directly for thread-related behavior deterministic.
"""
def callFromThread(self, f, *a, **kw):
"""
Call C{f(*a, **kw)} in this thread which should also be the reactor
thread.
"""
f(*a, **kw)
class WSGIResourceTests(TestCase):
def setUp(self):
"""
Create a L{WSGIResource} with synchronous threading objects and a no-op
application object. This is useful for testing certain things about
the resource implementation which are unrelated to WSGI.
"""
self.resource = WSGIResource(
SynchronousReactorThreads(), SynchronousThreadPool(),
lambda environ, startResponse: None)
def test_interfaces(self):
"""
L{WSGIResource} implements L{IResource} and stops resource traversal.
"""
verifyObject(IResource, self.resource)
self.assertTrue(self.resource.isLeaf)
def test_unsupported(self):
"""
A L{WSGIResource} cannot have L{IResource} children. Its
C{getChildWithDefault} and C{putChild} methods raise L{RuntimeError}.
"""
self.assertRaises(
RuntimeError,
self.resource.getChildWithDefault,
"foo", Request(DummyChannel(), False))
self.assertRaises(
RuntimeError,
self.resource.putChild,
"foo", Resource())
class WSGITestsMixin:
"""
@ivar channelFactory: A no-argument callable which will be invoked to
create a new HTTP channel to associate with request objects.
"""
channelFactory = DummyChannel
def setUp(self):
self.threadpool = SynchronousThreadPool()
self.reactor = SynchronousReactorThreads()
def lowLevelRender(
self, requestFactory, applicationFactory, channelFactory, method,
version, resourceSegments, requestSegments, query=None, headers=[],
body=None, safe=''):
"""
@param method: A C{str} giving the request method to use.
@param version: A C{str} like C{'1.1'} giving the request version.
@param resourceSegments: A C{list} of unencoded path segments which
specifies the location in the resource hierarchy at which the
L{WSGIResource} will be placed, eg C{['']} for I{/}, C{['foo',
'bar', '']} for I{/foo/bar/}, etc.
@param requestSegments: A C{list} of unencoded path segments giving the
request URI.
@param query: A C{list} of two-tuples of C{str} giving unencoded query
argument keys and values.
@param headers: A C{list} of two-tuples of C{str} giving request header
names and corresponding values.
@param safe: A C{str} giving the bytes which are to be considered
I{safe} for inclusion in the request URI and not quoted.
@return: A L{Deferred} which will be called back with a two-tuple of
the arguments passed which would be passed to the WSGI application
object for this configuration and request (ie, the environment and
start_response callable).
"""
root = WSGIResource(
self.reactor, self.threadpool, applicationFactory())
resourceSegments.reverse()
for seg in resourceSegments:
tmp = Resource()
tmp.putChild(seg, root)
root = tmp
channel = channelFactory()
channel.site = Site(root)
request = requestFactory(channel, False)
for k, v in headers:
request.requestHeaders.addRawHeader(k, v)
request.gotLength(0)
if body:
request.content.write(body)
request.content.seek(0)
uri = '/' + '/'.join([quote(seg, safe) for seg in requestSegments])
if query is not None:
uri += '?' + '&'.join(['='.join([quote(k, safe), quote(v, safe)])
for (k, v) in query])
request.requestReceived(method, uri, 'HTTP/' + version)
return request
def render(self, *a, **kw):
result = Deferred()
def applicationFactory():
def application(*args):
environ, startResponse = args
result.callback(args)
startResponse('200 OK', [])
return iter(())
return application
self.lowLevelRender(
Request, applicationFactory, self.channelFactory, *a, **kw)
return result
def requestFactoryFactory(self, requestClass=Request):
d = Deferred()
def requestFactory(*a, **kw):
request = requestClass(*a, **kw)
# If notifyFinish is called after lowLevelRender returns, it won't
# do the right thing, because the request will have already
# finished. One might argue that this is a bug in
# Request.notifyFinish.
request.notifyFinish().chainDeferred(d)
return request
return d, requestFactory
def getContentFromResponse(self, response):
return response.split('\r\n\r\n', 1)[1]
class EnvironTests(WSGITestsMixin, TestCase):
"""
Tests for the values in the C{environ} C{dict} passed to the application
object by L{twisted.web.wsgi.WSGIResource}.
"""
def environKeyEqual(self, key, value):
def assertEnvironKeyEqual((environ, startResponse)):
self.assertEqual(environ[key], value)
return assertEnvironKeyEqual
def test_environIsDict(self):
"""
L{WSGIResource} calls the application object with an C{environ}
parameter which is exactly of type C{dict}.
"""
d = self.render('GET', '1.1', [], [''])
def cbRendered((environ, startResponse)):
self.assertIdentical(type(environ), dict)
d.addCallback(cbRendered)
return d
def test_requestMethod(self):
"""
The C{'REQUEST_METHOD'} key of the C{environ} C{dict} passed to the
application contains the HTTP method in the request (RFC 3875, section
4.1.12).
"""
get = self.render('GET', '1.1', [], [''])
get.addCallback(self.environKeyEqual('REQUEST_METHOD', 'GET'))
# Also make sure a different request method shows up as a different
# value in the environ dict.
post = self.render('POST', '1.1', [], [''])
post.addCallback(self.environKeyEqual('REQUEST_METHOD', 'POST'))
return gatherResults([get, post])
def test_scriptName(self):
"""
The C{'SCRIPT_NAME'} key of the C{environ} C{dict} passed to the
application contains the I{abs_path} (RFC 2396, section 3) to this
resource (RFC 3875, section 4.1.13).
"""
root = self.render('GET', '1.1', [], [''])
root.addCallback(self.environKeyEqual('SCRIPT_NAME', ''))
emptyChild = self.render('GET', '1.1', [''], [''])
emptyChild.addCallback(self.environKeyEqual('SCRIPT_NAME', '/'))
leaf = self.render('GET', '1.1', ['foo'], ['foo'])
leaf.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo'))
container = self.render('GET', '1.1', ['foo', ''], ['foo', ''])
container.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo/'))
internal = self.render('GET', '1.1', ['foo'], ['foo', 'bar'])
internal.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo'))
unencoded = self.render(
'GET', '1.1', ['foo', '/', 'bar\xff'], ['foo', '/', 'bar\xff'])
# The RFC says "(not URL-encoded)", even though that makes
# interpretation of SCRIPT_NAME ambiguous.
unencoded.addCallback(
self.environKeyEqual('SCRIPT_NAME', '/foo///bar\xff'))
return gatherResults([
root, emptyChild, leaf, container, internal, unencoded])
def test_pathInfo(self):
"""
The C{'PATH_INFO'} key of the C{environ} C{dict} passed to the
application contains the suffix of the request URI path which is not
included in the value for the C{'SCRIPT_NAME'} key (RFC 3875, section
4.1.5).
"""
assertKeyEmpty = self.environKeyEqual('PATH_INFO', '')
root = self.render('GET', '1.1', [], [''])
root.addCallback(self.environKeyEqual('PATH_INFO', '/'))
emptyChild = self.render('GET', '1.1', [''], [''])
emptyChild.addCallback(assertKeyEmpty)
leaf = self.render('GET', '1.1', ['foo'], ['foo'])
leaf.addCallback(assertKeyEmpty)
container = self.render('GET', '1.1', ['foo', ''], ['foo', ''])
container.addCallback(assertKeyEmpty)
internalLeaf = self.render('GET', '1.1', ['foo'], ['foo', 'bar'])
internalLeaf.addCallback(self.environKeyEqual('PATH_INFO', '/bar'))
internalContainer = self.render('GET', '1.1', ['foo'], ['foo', ''])
internalContainer.addCallback(self.environKeyEqual('PATH_INFO', '/'))
unencoded = self.render('GET', '1.1', [], ['foo', '/', 'bar\xff'])
unencoded.addCallback(
self.environKeyEqual('PATH_INFO', '/foo///bar\xff'))
return gatherResults([
root, leaf, container, internalLeaf,
internalContainer, unencoded])
def test_queryString(self):
"""
The C{'QUERY_STRING'} key of the C{environ} C{dict} passed to the
application contains the portion of the request URI after the first
I{?} (RFC 3875, section 4.1.7).
"""
missing = self.render('GET', '1.1', [], [''], None)
missing.addCallback(self.environKeyEqual('QUERY_STRING', ''))
empty = self.render('GET', '1.1', [], [''], [])
empty.addCallback(self.environKeyEqual('QUERY_STRING', ''))
present = self.render('GET', '1.1', [], [''], [('foo', 'bar')])
present.addCallback(self.environKeyEqual('QUERY_STRING', 'foo=bar'))
unencoded = self.render('GET', '1.1', [], [''], [('/', '/')])
unencoded.addCallback(self.environKeyEqual('QUERY_STRING', '%2F=%2F'))
# "?" is reserved in the <searchpart> portion of a URL. However, it
# seems to be a common mistake of clients to forget to quote it. So,
# make sure we handle that invalid case.
doubleQuestion = self.render(
'GET', '1.1', [], [''], [('foo', '?bar')], safe='?')
doubleQuestion.addCallback(
self.environKeyEqual('QUERY_STRING', 'foo=?bar'))
return gatherResults([
missing, empty, present, unencoded, doubleQuestion])
def test_contentType(self):
"""
The C{'CONTENT_TYPE'} key of the C{environ} C{dict} passed to the
application contains the value of the I{Content-Type} request header
(RFC 3875, section 4.1.3).
"""
missing = self.render('GET', '1.1', [], [''])
missing.addCallback(self.environKeyEqual('CONTENT_TYPE', ''))
present = self.render(
'GET', '1.1', [], [''], None, [('content-type', 'x-foo/bar')])
present.addCallback(self.environKeyEqual('CONTENT_TYPE', 'x-foo/bar'))
return gatherResults([missing, present])
def test_contentLength(self):
"""
The C{'CONTENT_LENGTH'} key of the C{environ} C{dict} passed to the
application contains the value of the I{Content-Length} request header
(RFC 3875, section 4.1.2).
"""
missing = self.render('GET', '1.1', [], [''])
missing.addCallback(self.environKeyEqual('CONTENT_LENGTH', ''))
present = self.render(
'GET', '1.1', [], [''], None, [('content-length', '1234')])
present.addCallback(self.environKeyEqual('CONTENT_LENGTH', '1234'))
return gatherResults([missing, present])
def test_serverName(self):
"""
The C{'SERVER_NAME'} key of the C{environ} C{dict} passed to the
application contains the best determination of the server hostname
possible, using either the value of the I{Host} header in the request
or the address the server is listening on if that header is not
present (RFC 3875, section 4.1.14).
"""
missing = self.render('GET', '1.1', [], [''])
# 10.0.0.1 value comes from a bit far away -
# twisted.test.test_web.DummyChannel.transport.getHost().host
missing.addCallback(self.environKeyEqual('SERVER_NAME', '10.0.0.1'))
present = self.render(
'GET', '1.1', [], [''], None, [('host', 'example.org')])
present.addCallback(self.environKeyEqual('SERVER_NAME', 'example.org'))
return gatherResults([missing, present])
def test_serverPort(self):
"""
The C{'SERVER_PORT'} key of the C{environ} C{dict} passed to the
application contains the port number of the server which received the
request (RFC 3875, section 4.1.15).
"""
portNumber = 12354
def makeChannel():
channel = DummyChannel()
channel.transport = DummyChannel.TCP()
channel.transport.port = portNumber
return channel
self.channelFactory = makeChannel
d = self.render('GET', '1.1', [], [''])
d.addCallback(self.environKeyEqual('SERVER_PORT', str(portNumber)))
return d
def test_serverProtocol(self):
"""
The C{'SERVER_PROTOCOL'} key of the C{environ} C{dict} passed to the
application contains the HTTP version number received in the request
(RFC 3875, section 4.1.16).
"""
old = self.render('GET', '1.0', [], [''])
old.addCallback(self.environKeyEqual('SERVER_PROTOCOL', 'HTTP/1.0'))
new = self.render('GET', '1.1', [], [''])
new.addCallback(self.environKeyEqual('SERVER_PROTOCOL', 'HTTP/1.1'))
return gatherResults([old, new])
def test_remoteAddr(self):
"""
The C{'REMOTE_ADDR'} key of the C{environ} C{dict} passed to the
application contains the address of the client making the request.
"""
d = self.render('GET', '1.1', [], [''])
d.addCallback(self.environKeyEqual('REMOTE_ADDR', '192.168.1.1'))
return d
def test_headers(self):
"""
HTTP request headers are copied into the C{environ} C{dict} passed to
the application with a C{HTTP_} prefix added to their names.
"""
singleValue = self.render(
'GET', '1.1', [], [''], None, [('foo', 'bar'), ('baz', 'quux')])
def cbRendered((environ, startResponse)):
self.assertEqual(environ['HTTP_FOO'], 'bar')
self.assertEqual(environ['HTTP_BAZ'], 'quux')
singleValue.addCallback(cbRendered)
multiValue = self.render(
'GET', '1.1', [], [''], None, [('foo', 'bar'), ('foo', 'baz')])
multiValue.addCallback(self.environKeyEqual('HTTP_FOO', 'bar,baz'))
withHyphen = self.render(
'GET', '1.1', [], [''], None, [('foo-bar', 'baz')])
withHyphen.addCallback(self.environKeyEqual('HTTP_FOO_BAR', 'baz'))
multiLine = self.render(
'GET', '1.1', [], [''], None, [('foo', 'bar\n\tbaz')])
multiLine.addCallback(self.environKeyEqual('HTTP_FOO', 'bar \tbaz'))
return gatherResults([singleValue, multiValue, withHyphen, multiLine])
def test_wsgiVersion(self):
"""
The C{'wsgi.version'} key of the C{environ} C{dict} passed to the
application has the value C{(1, 0)} indicating that this is a WSGI 1.0
container.
"""
versionDeferred = self.render('GET', '1.1', [], [''])
versionDeferred.addCallback(self.environKeyEqual('wsgi.version', (1, 0)))
return versionDeferred
def test_wsgiRunOnce(self):
"""
The C{'wsgi.run_once'} key of the C{environ} C{dict} passed to the
application is set to C{False}.
"""
once = self.render('GET', '1.1', [], [''])
once.addCallback(self.environKeyEqual('wsgi.run_once', False))
return once
def test_wsgiMultithread(self):
"""
The C{'wsgi.multithread'} key of the C{environ} C{dict} passed to the
application is set to C{True}.
"""
thread = self.render('GET', '1.1', [], [''])
thread.addCallback(self.environKeyEqual('wsgi.multithread', True))
return thread
def test_wsgiMultiprocess(self):
"""
The C{'wsgi.multiprocess'} key of the C{environ} C{dict} passed to the
application is set to C{False}.
"""
process = self.render('GET', '1.1', [], [''])
process.addCallback(self.environKeyEqual('wsgi.multiprocess', False))
return process
def test_wsgiURLScheme(self):
"""
The C{'wsgi.url_scheme'} key of the C{environ} C{dict} passed to the
application has the request URL scheme.
"""
# XXX Does this need to be different if the request is for an absolute
# URL?
def channelFactory():
channel = DummyChannel()
channel.transport = DummyChannel.SSL()
return channel
self.channelFactory = DummyChannel
httpDeferred = self.render('GET', '1.1', [], [''])
httpDeferred.addCallback(self.environKeyEqual('wsgi.url_scheme', 'http'))
self.channelFactory = channelFactory
httpsDeferred = self.render('GET', '1.1', [], [''])
httpsDeferred.addCallback(self.environKeyEqual('wsgi.url_scheme', 'https'))
return gatherResults([httpDeferred, httpsDeferred])
def test_wsgiErrors(self):
"""
The C{'wsgi.errors'} key of the C{environ} C{dict} passed to the
application is a file-like object (as defined in the U{Input and Errors
Streams<http://www.python.org/dev/peps/pep-0333/#input-and-error-streams>}
section of PEP 333) which converts bytes written to it into events for
the logging system.
"""
events = []
addObserver(events.append)
self.addCleanup(removeObserver, events.append)
errors = self.render('GET', '1.1', [], [''])
def cbErrors((environ, startApplication)):
errors = environ['wsgi.errors']
errors.write('some message\n')
errors.writelines(['another\nmessage\n'])
errors.flush()
self.assertEqual(events[0]['message'], ('some message\n',))
self.assertEqual(events[0]['system'], 'wsgi')
self.assertTrue(events[0]['isError'])
self.assertEqual(events[1]['message'], ('another\nmessage\n',))
self.assertEqual(events[1]['system'], 'wsgi')
self.assertTrue(events[1]['isError'])
self.assertEqual(len(events), 2)
errors.addCallback(cbErrors)
return errors
class InputStreamTestMixin(WSGITestsMixin):
"""
A mixin for L{TestCase} subclasses which defines a number of tests against
L{_InputStream}. The subclass is expected to create a file-like object to
be wrapped by an L{_InputStream} under test.
"""
def getFileType(self):
raise NotImplementedError(
"%s.getFile must be implemented" % (self.__class__.__name__,))
def _renderAndReturnReaderResult(self, reader, content):
contentType = self.getFileType()
class CustomizedRequest(Request):
def gotLength(self, length):
# Always allocate a file of the specified type, instead of
# using the base behavior of selecting one depending on the
# length.
self.content = contentType()
def appFactoryFactory(reader):
result = Deferred()
def applicationFactory():
def application(*args):
environ, startResponse = args
result.callback(reader(environ['wsgi.input']))
startResponse('200 OK', [])
return iter(())
return application
return result, applicationFactory
d, appFactory = appFactoryFactory(reader)
self.lowLevelRender(
CustomizedRequest, appFactory, DummyChannel,
'PUT', '1.1', [], [''], None, [],
content)
return d
def test_readAll(self):
"""
Calling L{_InputStream.read} with no arguments returns the entire input
stream.
"""
bytes = "some bytes are here"
d = self._renderAndReturnReaderResult(lambda input: input.read(), bytes)
d.addCallback(self.assertEquals, bytes)
return d
def test_readSome(self):
"""
Calling L{_InputStream.read} with an integer returns that many bytes
from the input stream, as long as it is less than or equal to the total
number of bytes available.
"""
bytes = "hello, world."
d = self._renderAndReturnReaderResult(lambda input: input.read(3), bytes)
d.addCallback(self.assertEquals, "hel")
return d
def test_readMoreThan(self):
"""
Calling L{_InputStream.read} with an integer that is greater than the
total number of bytes in the input stream returns all bytes in the
input stream.
"""
bytes = "some bytes are here"
d = self._renderAndReturnReaderResult(
lambda input: input.read(len(bytes) + 3), bytes)
d.addCallback(self.assertEquals, bytes)
return d
def test_readTwice(self):
"""
Calling L{_InputStream.read} a second time returns bytes starting from
the position after the last byte returned by the previous read.
"""
bytes = "some bytes, hello"
def read(input):
input.read(3)
return input.read()
d = self._renderAndReturnReaderResult(read, bytes)
d.addCallback(self.assertEquals, bytes[3:])
return d
def test_readNone(self):
"""
Calling L{_InputStream.read} with C{None} as an argument returns all
bytes in the input stream.
"""
bytes = "the entire stream"
d = self._renderAndReturnReaderResult(
lambda input: input.read(None), bytes)
d.addCallback(self.assertEquals, bytes)
return d
def test_readNegative(self):
"""
Calling L{_InputStream.read} with a negative integer as an argument
returns all bytes in the input stream.
"""
bytes = "all of the input"
d = self._renderAndReturnReaderResult(
lambda input: input.read(-1), bytes)
d.addCallback(self.assertEquals, bytes)
return d
def test_readline(self):
"""
Calling L{_InputStream.readline} with no argument returns one line from
the input stream.
"""
bytes = "hello\nworld"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(), bytes)
d.addCallback(self.assertEquals, "hello\n")
return d
def test_readlineSome(self):
"""
Calling L{_InputStream.readline} with an integer returns at most that
many bytes, even if it is not enough to make up a complete line.
COMPATIBILITY NOTE: the size argument is excluded from the WSGI
specification, but is provided here anyhow, because useful libraries
such as python stdlib's cgi.py assume their input file-like-object
supports readline with a size argument. If you use it, be aware your
application may not be portable to other conformant WSGI servers.
"""
bytes = "goodbye\nworld"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(3), bytes)
d.addCallback(self.assertEquals, "goo")
return d
def test_readlineMoreThan(self):
"""
Calling L{_InputStream.readline} with an integer which is greater than
the number of bytes in the next line returns only the next line.
"""
bytes = "some lines\nof text"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(20), bytes)
d.addCallback(self.assertEquals, "some lines\n")
return d
def test_readlineTwice(self):
"""
Calling L{_InputStream.readline} a second time returns the line
following the line returned by the first call.
"""
bytes = "first line\nsecond line\nlast line"
def readline(input):
input.readline()
return input.readline()
d = self._renderAndReturnReaderResult(readline, bytes)
d.addCallback(self.assertEquals, "second line\n")
return d
def test_readlineNone(self):
"""
Calling L{_InputStream.readline} with C{None} as an argument returns
one line from the input stream.
"""
bytes = "this is one line\nthis is another line"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(None), bytes)
d.addCallback(self.assertEquals, "this is one line\n")
return d
def test_readlineNegative(self):
"""
Calling L{_InputStream.readline} with a negative integer as an argument
returns one line from the input stream.
"""
bytes = "input stream line one\nline two"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(-1), bytes)
d.addCallback(self.assertEquals, "input stream line one\n")
return d
def test_readlines(self):
"""
Calling L{_InputStream.readlines} with no arguments returns a list of
all lines from the input stream.
"""
bytes = "alice\nbob\ncarol"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(), bytes)
d.addCallback(self.assertEquals, ["alice\n", "bob\n", "carol"])
return d
def test_readlinesSome(self):
"""
Calling L{_InputStream.readlines} with an integer as an argument
returns a list of lines from the input stream with the argument serving
as an approximate bound on the total number of bytes to read.
"""
bytes = "123\n456\n789\n0"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(5), bytes)
def cbLines(lines):
# Make sure we got enough lines to make 5 bytes. Anything beyond
# that is fine too.
self.assertEquals(lines[:2], ["123\n", "456\n"])
d.addCallback(cbLines)
return d
def test_readlinesMoreThan(self):
"""
Calling L{_InputStream.readlines} with an integer which is greater than
the total number of bytes in the input stream returns a list of all
lines from the input.
"""
bytes = "one potato\ntwo potato\nthree potato"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(100), bytes)
d.addCallback(
self.assertEquals,
["one potato\n", "two potato\n", "three potato"])
return d
def test_readlinesAfterRead(self):
"""
Calling L{_InputStream.readlines} after a call to L{_InputStream.read}
returns lines starting at the byte after the last byte returned by the
C{read} call.
"""
bytes = "hello\nworld\nfoo"
def readlines(input):
input.read(7)
return input.readlines()
d = self._renderAndReturnReaderResult(readlines, bytes)
d.addCallback(self.assertEquals, ["orld\n", "foo"])
return d
def test_readlinesNone(self):
"""
Calling L{_InputStream.readlines} with C{None} as an argument returns
all lines from the input.
"""
bytes = "one fish\ntwo fish\n"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(None), bytes)
d.addCallback(self.assertEquals, ["one fish\n", "two fish\n"])
return d
def test_readlinesNegative(self):
"""
Calling L{_InputStream.readlines} with a negative integer as an
argument returns a list of all lines from the input.
"""
bytes = "red fish\nblue fish\n"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(-1), bytes)
d.addCallback(self.assertEquals, ["red fish\n", "blue fish\n"])
return d
def test_iterable(self):
"""
Iterating over L{_InputStream} produces lines from the input stream.
"""
bytes = "green eggs\nand ham\n"
d = self._renderAndReturnReaderResult(lambda input: list(input), bytes)
d.addCallback(self.assertEquals, ["green eggs\n", "and ham\n"])
return d
def test_iterableAfterRead(self):
"""
Iterating over L{_InputStream} after calling L{_InputStream.read}
produces lines from the input stream starting from the first byte after
the last byte returned by the C{read} call.
"""
bytes = "green eggs\nand ham\n"
def iterate(input):
input.read(3)
return list(input)
d = self._renderAndReturnReaderResult(iterate, bytes)
d.addCallback(self.assertEquals, ["en eggs\n", "and ham\n"])
return d
class InputStreamStringIOTests(InputStreamTestMixin, TestCase):
"""
Tests for L{_InputStream} when it is wrapped around a L{StringIO.StringIO}.
"""
def getFileType(self):
return StringIO.StringIO
class InputStreamCStringIOTests(InputStreamTestMixin, TestCase):
"""
Tests for L{_InputStream} when it is wrapped around a
L{cStringIO.StringIO}.
"""
def getFileType(self):
return cStringIO.StringIO
class InputStreamTemporaryFileTests(InputStreamTestMixin, TestCase):
"""
Tests for L{_InputStream} when it is wrapped around a L{tempfile.TemporaryFile}.
"""
def getFileType(self):
return tempfile.TemporaryFile
class StartResponseTests(WSGITestsMixin, TestCase):
"""
Tests for the I{start_response} parameter passed to the application object
by L{WSGIResource}.
"""
def test_status(self):
"""
The response status passed to the I{start_response} callable is written
as the status of the response to the request.
"""
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('107 Strange message', [])
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
'HTTP/1.1 107 Strange message'))
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def _headersTest(self, appHeaders, expectedHeaders):
"""
Verify that if the response headers given by C{appHeaders} are passed
to the I{start_response} callable, then the response header lines given
by C{expectedHeaders} plus I{Server} and I{Date} header lines are
included in the response.
"""
# Make the Date header value deterministic
self.patch(http, 'datetimeToString', lambda: 'Tuesday')
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', appHeaders)
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
response = channel.transport.written.getvalue()
headers, rest = response.split('\r\n\r\n', 1)
headerLines = headers.split('\r\n')[1:]
headerLines.sort()
allExpectedHeaders = expectedHeaders + [
'Date: Tuesday',
'Server: ' + version,
'Transfer-Encoding: chunked']
allExpectedHeaders.sort()
self.assertEqual(headerLines, allExpectedHeaders)
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_headers(self):
"""
The headers passed to the I{start_response} callable are included in
the response as are the required I{Date} and I{Server} headers and the
necessary connection (hop to hop) header I{Transfer-Encoding}.
"""
return self._headersTest(
[('foo', 'bar'), ('baz', 'quux')],
['Baz: quux', 'Foo: bar'])
def test_applicationProvidedContentType(self):
"""
If I{Content-Type} is included in the headers passed to the
I{start_response} callable, one I{Content-Type} header is included in
the response.
"""
return self._headersTest(
[('content-type', 'monkeys are great')],
['Content-Type: monkeys are great'])
def test_applicationProvidedServerAndDate(self):
"""
If either I{Server} or I{Date} is included in the headers passed to the
I{start_response} callable, they are disregarded.
"""
return self._headersTest(
[('server', 'foo'), ('Server', 'foo'),
('date', 'bar'), ('dATE', 'bar')],
[])
def test_delayedUntilReturn(self):
"""
Nothing is written in response to a request when the I{start_response}
callable is invoked. If the iterator returned by the application
object produces only empty strings, the response is written after the
last element is produced.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('foo', 'bar'), ('baz', 'quux')])
yield ''
record()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(intermediateValues, [''])
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_delayedUntilContent(self):
"""
Nothing is written in response to a request when the I{start_response}
callable is invoked. Once a non-empty string has been produced by the
iterator returned by the application object, the response status and
headers are written.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('foo', 'bar')])
yield ''
record()
yield 'foo'
record()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertFalse(intermediateValues[0])
self.assertTrue(intermediateValues[1])
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_content(self):
"""
Content produced by the iterator returned by the application object is
written to the request as it is produced.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('content-length', '6')])
yield 'foo'
record()
yield 'bar'
record()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(
self.getContentFromResponse(intermediateValues[0]),
'foo')
self.assertEqual(
self.getContentFromResponse(intermediateValues[1]),
'foobar')
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_multipleStartResponse(self):
"""
If the I{start_response} callable is invoked multiple times before a
data for the response body is produced, the values from the last call
are used.
"""
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('100 Foo', [])
startResponse('200 Bar', [])
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
'HTTP/1.1 200 Bar\r\n'))
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_startResponseWithException(self):
"""
If the I{start_response} callable is invoked with a third positional
argument before the status and headers have been written to the
response, the status and headers become the newly supplied values.
"""
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('100 Foo', [], (Exception, Exception("foo"), None))
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
'HTTP/1.1 100 Foo\r\n'))
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_startResponseWithExceptionTooLate(self):
"""
If the I{start_response} callable is invoked with a third positional
argument after the status and headers have been written to the
response, the supplied I{exc_info} values are re-raised to the
application.
"""
channel = DummyChannel()
class SomeException(Exception):
pass
try:
raise SomeException()
except:
excInfo = exc_info()
reraised = []
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
yield 'foo'
try:
startResponse('500 ERR', [], excInfo)
except:
reraised.append(exc_info())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
'HTTP/1.1 200 OK\r\n'))
self.assertEqual(reraised[0][0], excInfo[0])
self.assertEqual(reraised[0][1], excInfo[1])
self.assertEqual(reraised[0][2].tb_next, excInfo[2])
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_write(self):
"""
I{start_response} returns the I{write} callable which can be used to
write bytes to the response body without buffering.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
write = startResponse('100 Foo', [('content-length', '6')])
write('foo')
record()
write('bar')
record()
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(
self.getContentFromResponse(intermediateValues[0]),
'foo')
self.assertEqual(
self.getContentFromResponse(intermediateValues[1]),
'foobar')
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
class ApplicationTests(WSGITestsMixin, TestCase):
"""
Tests for things which are done to the application object and the iterator
it returns.
"""
def enableThreads(self):
self.reactor = reactor
self.threadpool = ThreadPool()
self.threadpool.start()
self.addCleanup(self.threadpool.stop)
def test_close(self):
"""
If the application object returns an iterator which also has a I{close}
method, that method is called after iteration is complete.
"""
channel = DummyChannel()
class Result:
def __init__(self):
self.open = True
def __iter__(self):
for i in range(3):
if self.open:
yield str(i)
def close(self):
self.open = False
result = Result()
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('content-length', '3')])
return result
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(
self.getContentFromResponse(
channel.transport.written.getvalue()),
'012')
self.assertFalse(result.open)
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''])
return d
def test_applicationCalledInThread(self):
"""
The application object is invoked and iterated in a thread which is not
the reactor thread.
"""
self.enableThreads()
invoked = []
def applicationFactory():
def application(environ, startResponse):
def result():
for i in range(3):
invoked.append(get_ident())
yield str(i)
invoked.append(get_ident())
startResponse('200 OK', [('content-length', '3')])
return result()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertNotIn(get_ident(), invoked)
self.assertEqual(len(set(invoked)), 1)
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
DummyChannel, 'GET', '1.1', [], [''])
return d
def test_writeCalledFromThread(self):
"""
The I{write} callable returned by I{start_response} calls the request's
C{write} method in the reactor thread.
"""
self.enableThreads()
invoked = []
class ThreadVerifier(Request):
def write(self, bytes):
invoked.append(get_ident())
return Request.write(self, bytes)
def applicationFactory():
def application(environ, startResponse):
write = startResponse('200 OK', [])
write('foo')
return iter(())
return application
d, requestFactory = self.requestFactoryFactory(ThreadVerifier)
def cbRendered(ignored):
self.assertEqual(set(invoked), set([get_ident()]))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return d
def test_iteratedValuesWrittenFromThread(self):
"""
Strings produced by the iterator returned by the application object are
written to the request in the reactor thread.
"""
self.enableThreads()
invoked = []
class ThreadVerifier(Request):
def write(self, bytes):
invoked.append(get_ident())
return Request.write(self, bytes)
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
yield 'foo'
return application
d, requestFactory = self.requestFactoryFactory(ThreadVerifier)
def cbRendered(ignored):
self.assertEqual(set(invoked), set([get_ident()]))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return d
def test_statusWrittenFromThread(self):
"""
The response status is set on the request object in the reactor thread.
"""
self.enableThreads()
invoked = []
class ThreadVerifier(Request):
def setResponseCode(self, code, message):
invoked.append(get_ident())
return Request.setResponseCode(self, code, message)
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
return iter(())
return application
d, requestFactory = self.requestFactoryFactory(ThreadVerifier)
def cbRendered(ignored):
self.assertEqual(set(invoked), set([get_ident()]))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return d
def test_connectionClosedDuringIteration(self):
"""
If the request connection is lost while the application object is being
iterated, iteration is stopped.
"""
class UnreliableConnection(Request):
"""
This is a request which pretends its connection is lost immediately
after the first write is done to it.
"""
def write(self, bytes):
self.connectionLost(Failure(ConnectionLost("No more connection")))
self.badIter = False
def appIter():
yield "foo"
self.badIter = True
raise Exception("Should not have gotten here")
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
return appIter()
return application
d, requestFactory = self.requestFactoryFactory(UnreliableConnection)
def cbRendered(ignored):
self.assertFalse(self.badIter, "Should not have resumed iteration")
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return self.assertFailure(d, ConnectionLost)
def _internalServerErrorTest(self, application):
channel = DummyChannel()
def applicationFactory():
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertTrue(
channel.transport.written.getvalue().startswith(
'HTTP/1.1 500 Internal Server Error'))
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_applicationExceptionBeforeStartResponse(self):
"""
If the application raises an exception before calling I{start_response}
then the response status is I{500} and the exception is logged.
"""
def application(environ, startResponse):
raise RuntimeError("This application had some error.")
return self._internalServerErrorTest(application)
def test_applicationExceptionAfterStartResponse(self):
"""
If the application calls I{start_response} but then raises an exception
before any data is written to the response then the response status is
I{500} and the exception is logged.
"""
def application(environ, startResponse):
startResponse('200 OK', [])
raise RuntimeError("This application had some error.")
return self._internalServerErrorTest(application)
def _connectionClosedTest(self, application, responseContent):
channel = DummyChannel()
def applicationFactory():
return application
d, requestFactory = self.requestFactoryFactory()
# Capture the request so we can disconnect it later on.
requests = []
def requestFactoryWrapper(*a, **kw):
requests.append(requestFactory(*a, **kw))
return requests[-1]
def ebRendered(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
response = channel.transport.written.getvalue()
self.assertTrue(response.startswith('HTTP/1.1 200 OK'))
# Chunked transfer-encoding makes this a little messy.
self.assertIn(responseContent, response)
d.addErrback(ebRendered)
request = self.lowLevelRender(
requestFactoryWrapper, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
# By now the connection should be closed.
self.assertTrue(channel.transport.disconnected)
# Give it a little push to go the rest of the way.
requests[0].connectionLost(Failure(ConnectionLost("All gone")))
return d
def test_applicationExceptionAfterWrite(self):
"""
If the application raises an exception after the response status has
already been sent then the connection is closed and the exception is
logged.
"""
responseContent = (
'Some bytes, triggering the server to start sending the response')
def application(environ, startResponse):
startResponse('200 OK', [])
yield responseContent
raise RuntimeError("This application had some error.")
return self._connectionClosedTest(application, responseContent)
def test_applicationCloseException(self):
"""
If the application returns a closeable iterator and the C{close} method
raises an exception when called then the connection is still closed and
the exception is logged.
"""
responseContent = 'foo'
class Application(object):
def __init__(self, environ, startResponse):
startResponse('200 OK', [])
def __iter__(self):
yield responseContent
def close(self):
raise RuntimeError("This application had some error.")
return self._connectionClosedTest(Application, responseContent)
|
romanz/trezor-agent
|
refs/heads/master
|
agents/trezor/trezor_agent.py
|
1
|
from libagent import signify, gpg, ssh
from libagent.device.trezor import Trezor as DeviceType
ssh_agent = lambda: ssh.main(DeviceType)
gpg_tool = lambda: gpg.main(DeviceType)
gpg_agent = lambda: gpg.run_agent(DeviceType)
signify_tool = lambda: signify.main(DeviceType)
|
dgzurita/odoo
|
refs/heads/8.0
|
addons/base_gengo/__openerp__.py
|
312
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Automated Translations through Gengo API',
'version': '0.1',
'category': 'Tools',
'description': """
Automated Translations through Gengo API
========================================
This module will install passive scheduler job for automated translations
using the Gengo API. To activate it, you must
1) Configure your Gengo authentication parameters under `Settings > Companies > Gengo Parameters`
2) Launch the wizard under `Settings > Application Terms > Gengo: Manual Request of Translation` and follow the wizard.
This wizard will activate the CRON job and the Scheduler and will start the automatic translation via Gengo Services for all the terms where you requested it.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base'],
'data': [
'gengo_sync_schedular_data.xml',
'ir_translation.xml',
'res_company_view.xml',
'wizard/base_gengo_translations_view.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
geary/claslite
|
refs/heads/master
|
web/app/lib/antlr3/compat.py
|
150
|
"""Compatibility stuff"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
try:
set = set
frozenset = frozenset
except NameError:
from sets import Set as set, ImmutableSet as frozenset
try:
reversed = reversed
except NameError:
def reversed(l):
l = l[:]
l.reverse()
return l
|
Yannig/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/panos/panos_dag.py
|
29
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_dag
short_description: create a dynamic address group
description:
- Create a dynamic address group object in the firewall used for policy rules
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
default: null
password:
description:
- password for authentication
required: true
default: null
username:
description:
- username for authentication
required: false
default: "admin"
dag_name:
description:
- name of the dynamic address group
required: true
default: null
dag_filter:
description:
- dynamic filter user by the dynamic address group
required: true
default: null
commit:
description:
- commit if changed
required: false
default: true
'''
EXAMPLES = '''
- name: dag
panos_dag:
ip_address: "192.168.1.1"
password: "admin"
dag_name: "dag-1"
dag_filter: "'aws-tag.aws:cloudformation:logical-id.ServerInstance' and 'instanceState.running'"
'''
RETURN='''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
_ADDRGROUP_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/vsys/entry[@name='vsys1']/address-group/entry[@name='%s']"
def addressgroup_exists(xapi, group_name):
xapi.get(_ADDRGROUP_XPATH % group_name)
e = xapi.element_root.find('.//entry')
if e is None:
return False
return True
def add_dag(xapi, dag_name, dag_filter):
if addressgroup_exists(xapi, dag_name):
return False
# setup the non encrypted part of the monitor
exml = []
exml.append('<dynamic>')
exml.append('<filter>%s</filter>' % dag_filter)
exml.append('</dynamic>')
exml = ''.join(exml)
xapi.set(xpath=_ADDRGROUP_XPATH % dag_name, element=exml)
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
dag_name=dict(required=True),
dag_filter=dict(required=True),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
dag_name = module.params['dag_name']
dag_filter = module.params['dag_filter']
commit = module.params['commit']
changed = add_dag(xapi, dag_name, dag_filter)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
|
sorenmalling/skype4py
|
refs/heads/master
|
Skype4Py/lang/fi.py
|
23
|
apiAttachAvailable = u'API saatavilla'
apiAttachNotAvailable = u'Ei saatavilla'
apiAttachPendingAuthorization = u'Odottaa valtuutusta'
apiAttachRefused = u'Ev\xe4tty'
apiAttachSuccess = u'Onnistui'
apiAttachUnknown = u'Tuntematon'
budDeletedFriend = u'Poistettu yst\xe4v\xe4listasta'
budFriend = u'Yst\xe4v\xe4'
budNeverBeenFriend = u'Ei aiempia yst\xe4v\xe4listauksia'
budPendingAuthorization = u'Odottaa valtuutusta'
budUnknown = u'Tuntematon'
cfrBlockedByRecipient = u'Vastaanottaja esti soiton'
cfrMiscError = u'Sekal virhe'
cfrNoCommonCodec = u'Ei yleinen koodekki'
cfrNoProxyFound = u'Proxy ei l\xf6ytynyt'
cfrNotAuthorizedByRecipient = u'K\xe4ytt\xe4j\xe4ll\xe4 ei ole vastaanottajan hyv\xe4ksynt\xe4\xe4'
cfrRecipientNotFriend = u'Vastaanottaja ei ole yst\xe4v\xe4'
cfrRemoteDeviceError = u'Ongelma et\xe4-\xe4\xe4nilaitteessa'
cfrSessionTerminated = u'Istunto p\xe4\xe4ttyi'
cfrSoundIOError = u'\xc4\xe4nen I/O-virhe'
cfrSoundRecordingError = u'\xc4\xe4nentallennusvirhe'
cfrUnknown = u'Tuntematon'
cfrUserDoesNotExist = u'Tuntematon k\xe4ytt\xe4j\xe4/puhelinnumero'
cfrUserIsOffline = u'H\xe4n on Offline-tilassa'
chsAllCalls = u'Vanha dialogi'
chsDialog = u'Dialogi'
chsIncomingCalls = u'Multi-chat odottaa hyv\xe4ksynt\xf6j\xe4'
chsLegacyDialog = u'Vanha dialogi'
chsMissedCalls = u'Dialogi'
chsMultiNeedAccept = u'Multi-chat odottaa hyv\xe4ksynt\xf6j\xe4'
chsMultiSubscribed = u'Multi tilattu'
chsOutgoingCalls = u'Multi tilattu'
chsUnknown = u'Tuntematon'
chsUnsubscribed = u'Ei tilaaja'
clsBusy = u'Varattu'
clsCancelled = u'Peruutettu'
clsEarlyMedia = u'K\xe4sittelee ennakkomediaa (Early Media)'
clsFailed = u'Ik\xe4v\xe4 kyll\xe4, puhelu ep\xe4onnistui!'
clsFinished = u'Valmis'
clsInProgress = u'Puhelu k\xe4ynniss\xe4'
clsLocalHold = u'Paikalliseti pidossa'
clsMissed = u'vastaamaton puhelu'
clsOnHold = u'Pidossa'
clsRefused = u'Ev\xe4tty'
clsRemoteHold = u'Et\xe4pidossa'
clsRinging = u'soittamassa'
clsRouting = u'Reitittt\xe4\xe4'
clsTransferred = u'Tuntematon'
clsTransferring = u'Tuntematon'
clsUnknown = u'Tuntematon'
clsUnplaced = u'Ei koskaan valittu'
clsVoicemailBufferingGreeting = u'Puskuroi tervehdyst\xe4'
clsVoicemailCancelled = u'Puheposti on peruutettu'
clsVoicemailFailed = u'Puheposti ep\xe4onnistui'
clsVoicemailPlayingGreeting = u'Toistaa tervehdyst\xe4'
clsVoicemailRecording = u'Puhepostin \xe4\xe4nitys'
clsVoicemailSent = u'Puheposti on l\xe4hetetty'
clsVoicemailUploading = u'Lataa puhepostia'
cltIncomingP2P = u'Saapuva vertaissoitto'
cltIncomingPSTN = u'Saapuva puhelinsoitto'
cltOutgoingP2P = u'L\xe4htev\xe4 vertaissoitto'
cltOutgoingPSTN = u'L\xe4htev\xe4 puhelinsoitto'
cltUnknown = u'Tuntematon'
cmeAddedMembers = u'Lis\xe4tty j\xe4senet'
cmeCreatedChatWith = u'Luotu chat-yhteys'
cmeEmoted = u'Tuntematon'
cmeLeft = u'Poistunut'
cmeSaid = u'Sanottu'
cmeSawMembers = u'N\xe4hty j\xe4senet'
cmeSetTopic = u'Aseta aihe'
cmeUnknown = u'Tuntematon'
cmsRead = u'Lue'
cmsReceived = u'Vastaanotettu'
cmsSending = u'L\xe4hetet\xe4\xe4n...'
cmsSent = u'L\xe4hetetty'
cmsUnknown = u'Tuntematon'
conConnecting = u'Yhdistet\xe4\xe4n'
conOffline = u'Offline-tila'
conOnline = u'Online-tila'
conPausing = u'Tauko'
conUnknown = u'Tuntematon'
cusAway = u'Poistunut'
cusDoNotDisturb = u'\xc4l\xe4 h\xe4iritse'
cusInvisible = u'Huomaamaton'
cusLoggedOut = u'Offline-tila'
cusNotAvailable = u'Ei saatavilla'
cusOffline = u'Offline-tila'
cusOnline = u'Online-tila'
cusSkypeMe = u'Soita minulle'
cusUnknown = u'Tuntematon'
cvsBothEnabled = u'Videon l\xe4hetys ja vastaanotto'
cvsNone = u'Ei videota'
cvsReceiveEnabled = u'Videon vastaanotto'
cvsSendEnabled = u'Videon l\xe4hetys'
cvsUnknown = u''
grpAllFriends = u'Kaikki yst\xe4v\xe4t'
grpAllUsers = u'Kaikki k\xe4ytt\xe4j\xe4t'
grpCustomGroup = u'R\xe4\xe4t\xe4l\xf6ity'
grpOnlineFriends = u'Netiss\xe4 olevat yst\xe4v\xe4t'
grpPendingAuthorizationFriends = u'Odottaa valtuutusta'
grpProposedSharedGroup = u'Proposed Shared Group'
grpRecentlyContactedUsers = u'\xc4skett\xe4iset yhteydet'
grpSharedGroup = u'Shared Group'
grpSkypeFriends = u'Skype-yst\xe4v\xe4t'
grpSkypeOutFriends = u'SkypeOut-yst\xe4v\xe4t'
grpUngroupedFriends = u'Ryhmitt\xe4m\xe4tt\xf6m\xe4t yst\xe4v\xe4ni'
grpUnknown = u'Tuntematon'
grpUsersAuthorizedByMe = u'Valtuutin'
grpUsersBlockedByMe = u'Estin'
grpUsersWaitingMyAuthorization = u'Odottaa valtuutustani'
leaAddDeclined = u'Lis\xe4ys torjuttu'
leaAddedNotAuthorized = u'Lis\xe4tyn t\xe4ytyy olla valtuutettu'
leaAdderNotFriend = u'Lis\xe4\xe4j\xe4n t\xe4ytyy olla yst\xe4v\xe4'
leaUnknown = u'Tuntematon'
leaUnsubscribe = u'Ei tilaaja'
leaUserIncapable = u'K\xe4ytt\xe4j\xe4 esteellinen'
leaUserNotFound = u'K\xe4ytt\xe4j\xe4\xe4 ei l\xf6ytynyt'
olsAway = u'Poistunut'
olsDoNotDisturb = u'\xc4l\xe4 h\xe4iritse'
olsNotAvailable = u'Ei saatavilla'
olsOffline = u'Offline-tila'
olsOnline = u'Online-tila'
olsSkypeMe = u'Soita minulle'
olsSkypeOut = u'SkypeOut'
olsUnknown = u'Tuntematon'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'Nainen'
usexMale = u'Mies'
usexUnknown = u'Tuntematon'
vmrConnectError = u'Yhdist\xe4misvirhe'
vmrFileReadError = u'Tiedostonlukuvirhe'
vmrFileWriteError = u'Tiedostonkirjoitusvirhe'
vmrMiscError = u'Sekal virhe'
vmrNoError = u'Ei virhett\xe4'
vmrNoPrivilege = u'Ei puhepostioikeutta'
vmrNoVoicemail = u'Tuntematon puheposti'
vmrPlaybackError = u'Toistovirhe'
vmrRecordingError = u'Tallennusvirhe'
vmrUnknown = u'Tuntematon'
vmsBlank = u'Tyhj\xe4'
vmsBuffering = u'Puskuroidaan'
vmsDeleting = u'Poistetaan'
vmsDownloading = u'Imuroidaan'
vmsFailed = u'Ep\xe4onnistui'
vmsNotDownloaded = u'Ei imuroitu'
vmsPlayed = u'Toistettu'
vmsPlaying = u'Toistetaan'
vmsRecorded = u'Tallennettu'
vmsRecording = u'Puhepostin \xe4\xe4nitys'
vmsUnknown = u'Tuntematon'
vmsUnplayed = u'Ei toistettu'
vmsUploaded = u'Ladattu'
vmsUploading = u'Ladataan'
vmtCustomGreeting = u'R\xe4\xe4t\xe4l\xf6ity tervehdys'
vmtDefaultGreeting = u'Oletustervehdys'
vmtIncoming = u'saapuva puheposti'
vmtOutgoing = u'L\xe4htev\xe4'
vmtUnknown = u'Tuntematon'
vssAvailable = u'Saatavilla'
vssNotAvailable = u'Ei saatavilla'
vssPaused = u'Tauko'
vssRejected = u'Torjuttu'
vssRunning = u'Meneill\xe4\xe4n'
vssStarting = u'Aloittaa'
vssStopping = u'Lopetetaan'
vssUnknown = u'Tuntematon'
|
clstl/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/manifest/utils.py
|
12
|
import os
from six import StringIO
blacklist = ["/", "/tools/", "/resources/", "/common/", "/conformance-checkers/", "_certs"]
def rel_path_to_url(rel_path, url_base="/"):
assert not os.path.isabs(rel_path)
if url_base[0] != "/":
url_base = "/" + url_base
if url_base[-1] != "/":
url_base += "/"
return url_base + rel_path.replace(os.sep, "/")
def is_blacklisted(url):
for item in blacklist:
if item == "/":
if "/" not in url[1:]:
return True
elif url.startswith(item):
return True
return False
def from_os_path(path):
return path.replace(os.path.sep, "/")
def to_os_path(path):
return path.replace("/", os.path.sep)
class ContextManagerStringIO(StringIO):
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
class cached_property(object):
def __init__(self, func):
self.func = func
self.__doc__ = getattr(func, "__doc__")
self.name = func.__name__
def __get__(self, obj, cls=None):
if obj is None:
return self
if self.name not in obj.__dict__:
obj.__dict__[self.name] = self.func(obj)
obj.__dict__.setdefault("__cached_properties__", set()).add(self.name)
return obj.__dict__[self.name]
|
riklaunim/django-custom-multisite
|
refs/heads/master
|
django/contrib/staticfiles/storage.py
|
66
|
from __future__ import with_statement
import hashlib
import os
import posixpath
import re
from urllib import unquote
from urlparse import urlsplit, urlunsplit, urldefrag
from django.conf import settings
from django.core.cache import (get_cache, InvalidCacheBackendError,
cache as default_cache)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode, smart_str
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class CachedFilesMixin(object):
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
r"""(@import\s*["']\s*(.*?)["'])""",
)),
)
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.cache = get_cache('staticfiles')
except InvalidCacheBackendError:
# Use the default backend
self.cache = default_cache
self._patterns = SortedDict()
for extension, patterns in self.patterns:
for pattern in patterns:
compiled = re.compile(pattern)
self._patterns.setdefault(extension, []).append(compiled)
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
# Get the MD5 hash of the file
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
md5sum = md5.hexdigest()[:12]
hashed_name = os.path.join(path, u"%s.%s%s" %
(root, md5sum, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def cache_key(self, name):
return u'staticfiles:%s' % hashlib.md5(smart_str(name)).hexdigest()
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
cache_key = self.cache_key(name)
hashed_name = self.cache.get(cache_key)
if hashed_name is None:
hashed_name = self.hashed_name(clean_name).replace('\\', '/')
# set the cache if there was a miss
# (e.g. if cache server goes down)
self.cache.set(cache_key, hashed_name)
final_url = super(CachedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name):
"""
Returns the custom URL converter for the given file name.
"""
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return 'url("%s")' % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given list of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_paths = {}
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read()
converter = self.url_converter(name)
for patterns in self._patterns.values():
for pattern in patterns:
content = pattern.sub(converter, content)
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(smart_str(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_unicode(saved_name.replace('\\', '/'))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_unicode(saved_name.replace('\\', '/'))
# and then set the cache accordingly
hashed_paths[self.cache_key(name)] = hashed_name
yield name, hashed_name, processed
# Finally set the cache
self.cache.set_many(hashed_paths)
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
prefix = None
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is the actual app module
mod = import_module(app)
mod_path = os.path.dirname(mod.__file__)
location = os.path.join(mod_path, self.source_dir)
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
|
tumbl3w33d/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_remote_role.py
|
22
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_remote_role import ApiParameters
from library.modules.bigip_remote_role import ModuleParameters
from library.modules.bigip_remote_role import ModuleManager
from library.modules.bigip_remote_role import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_remote_role import ApiParameters
from ansible.modules.network.f5.bigip_remote_role import ModuleParameters
from ansible.modules.network.f5.bigip_remote_role import ModuleManager
from ansible.modules.network.f5.bigip_remote_role import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
terminal_access='none',
)
p = ModuleParameters(params=args)
assert p.terminal_access == 'disable'
def test_api_parameters(self):
args = load_fixture('load_auth_remote_role_role_info_1.json')
p = ApiParameters(params=args)
assert p.terminal_access == 'disable'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_remote_syslog(self, *args):
set_module_args(dict(
name='foo',
line_order=1000,
attribute_string='bar',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
bsmr-ansible/ansible-modules-extras
|
refs/heads/devel
|
cloud/cloudstack/cs_staticnat.py
|
44
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_staticnat
short_description: Manages static NATs on Apache CloudStack based clouds.
description:
- Create, update and remove static NATs.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the static NAT is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the static NAT for.
- Required if C(state=present).
required: false
default: null
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the static NAT.
required: false
default: false
network:
description:
- Network the IP address is related to.
required: false
default: null
version_added: "2.2"
state:
description:
- State of the static NAT.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the static NAT is related to.
required: false
default: null
account:
description:
- Account the static NAT is related to.
required: false
default: null
project:
description:
- Name of the project the static NAT is related to.
required: false
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create a static NAT: 1.2.3.4 -> web01
- local_action:
module: cs_staticnat
ip_address: 1.2.3.4
vm: web01
# remove a static NAT
- local_action:
module: cs_staticnat
ip_address: 1.2.3.4
state: absent
'''
RETURN = '''
---
id:
description: UUID of the ip_address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
zone:
description: Name of zone the static NAT is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the static NAT is related to.
returned: success
type: string
sample: Production
account:
description: Account the static NAT is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the static NAT is related to.
returned: success
type: string
sample: example domain
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackStaticNat(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackStaticNat, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmipaddress': 'vm_guest_ip',
}
def create_static_nat(self, ip_address):
self.result['changed'] = True
args = {}
args['virtualmachineid'] = self.get_vm(key='id')
args['ipaddressid'] = ip_address['id']
args['vmguestip'] = self.get_vm_guest_ip()
args['networkid'] = self.get_network(key='id')
if not self.module.check_mode:
res = self.cs.enableStaticNat(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
# reset ip address and query new values
self.ip_address = None
ip_address = self.get_ip_address()
return ip_address
def update_static_nat(self, ip_address):
args = {}
args['virtualmachineid'] = self.get_vm(key='id')
args['ipaddressid'] = ip_address['id']
args['vmguestip'] = self.get_vm_guest_ip()
# make an alias, so we can use _has_changed()
ip_address['vmguestip'] = ip_address['vmipaddress']
if self.has_changed(args, ip_address, ['vmguestip', 'virtualmachineid']):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.disableStaticNat(ipaddressid=ip_address['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
self.poll_job(res, 'staticnat')
res = self.cs.enableStaticNat(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
# reset ip address and query new values
self.ip_address = None
ip_address = self.get_ip_address()
return ip_address
def present_static_nat(self):
ip_address = self.get_ip_address()
if not ip_address['isstaticnat']:
ip_address = self.create_static_nat(ip_address)
else:
ip_address = self.update_static_nat(ip_address)
return ip_address
def absent_static_nat(self):
ip_address = self.get_ip_address()
if ip_address['isstaticnat']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.disableStaticNat(ipaddressid=ip_address['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'staticnat')
return ip_address
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
vm = dict(default=None),
vm_guest_ip = dict(default=None),
network = dict(default=None),
state = dict(choices=['present', 'absent'], default='present'),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_static_nat = AnsibleCloudStackStaticNat(module)
state = module.params.get('state')
if state in ['absent']:
ip_address = acs_static_nat.absent_static_nat()
else:
ip_address = acs_static_nat.present_static_nat()
result = acs_static_nat.get_result(ip_address)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
abhishekgahlot/scikit-learn
|
refs/heads/master
|
examples/svm/plot_weighted_samples.py
|
69
|
"""
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasis the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
|
mitmproxy/mitmproxy
|
refs/heads/main
|
test/mitmproxy/addons/test_maplocal.py
|
2
|
import sys
from pathlib import Path
import pytest
from mitmproxy.addons.maplocal import MapLocal, MapLocalSpec, file_candidates
from mitmproxy.utils.spec import parse_spec
from mitmproxy.test import taddons
from mitmproxy.test import tflow
@pytest.mark.parametrize(
"url,spec,expected_candidates",
[
# trailing slashes
("https://example.com/foo", ":example.com/foo:/tmp", ["/tmp/index.html"]),
("https://example.com/foo/", ":example.com/foo:/tmp", ["/tmp/index.html"]),
("https://example.com/foo", ":example.com/foo:/tmp/", ["/tmp/index.html"]),
] + [
# simple prefixes
("http://example.com/foo/bar.jpg", ":example.com/foo:/tmp", ["/tmp/bar.jpg", "/tmp/bar.jpg/index.html"]),
("https://example.com/foo/bar.jpg", ":example.com/foo:/tmp", ["/tmp/bar.jpg", "/tmp/bar.jpg/index.html"]),
("https://example.com/foo/bar.jpg?query", ":example.com/foo:/tmp", ["/tmp/bar.jpg", "/tmp/bar.jpg/index.html"]),
("https://example.com/foo/bar/baz.jpg", ":example.com/foo:/tmp",
["/tmp/bar/baz.jpg", "/tmp/bar/baz.jpg/index.html"]),
("https://example.com/foo/bar.jpg", ":/foo/bar.jpg:/tmp", ["/tmp/index.html"]),
] + [
# URL decode and special characters
("http://example.com/foo%20bar.jpg", ":example.com:/tmp", [
"/tmp/foo bar.jpg",
"/tmp/foo bar.jpg/index.html",
"/tmp/foo_bar.jpg",
"/tmp/foo_bar.jpg/index.html"
]),
("http://example.com/fóobår.jpg", ":example.com:/tmp", [
"/tmp/fóobår.jpg",
"/tmp/fóobår.jpg/index.html",
"/tmp/f_ob_r.jpg",
"/tmp/f_ob_r.jpg/index.html"
]),
] + [
# index.html
("https://example.com/foo", ":example.com/foo:/tmp", ["/tmp/index.html"]),
("https://example.com/foo/", ":example.com/foo:/tmp", ["/tmp/index.html"]),
("https://example.com/foo/bar", ":example.com/foo:/tmp", ["/tmp/bar", "/tmp/bar/index.html"]),
("https://example.com/foo/bar/", ":example.com/foo:/tmp", ["/tmp/bar", "/tmp/bar/index.html"]),
] + [
# regex
(
"https://example/view.php?f=foo.jpg",
":example/view.php\\?f=(.+):/tmp",
["/tmp/foo.jpg", "/tmp/foo.jpg/index.html"]
), (
"https://example/results?id=1&foo=2",
":example/(results\\?id=.+):/tmp",
[
"/tmp/results?id=1&foo=2",
"/tmp/results?id=1&foo=2/index.html",
"/tmp/results_id=1_foo=2",
"/tmp/results_id=1_foo=2/index.html"
]
),
] + [
# test directory traversal detection
("https://example.com/../../../../../../etc/passwd", ":example.com:/tmp", []),
# this is slightly hacky, but werkzeug's behavior differs per system.
("https://example.com/C:\\foo.txt", ":example.com:/tmp", [] if sys.platform == "win32" else [
"/tmp/C:\\foo.txt",
"/tmp/C:\\foo.txt/index.html",
"/tmp/C__foo.txt",
"/tmp/C__foo.txt/index.html"
]),
("https://example.com//etc/passwd", ":example.com:/tmp", ["/tmp/etc/passwd", "/tmp/etc/passwd/index.html"]),
]
)
def test_file_candidates(url, spec, expected_candidates):
# we circumvent the path existence checks here to simplify testing
filt, subj, repl = parse_spec(spec)
spec = MapLocalSpec(filt, subj, Path(repl))
candidates = file_candidates(url, spec)
assert [x.as_posix() for x in candidates] == expected_candidates
class TestMapLocal:
def test_configure(self, tmpdir):
ml = MapLocal()
with taddons.context(ml) as tctx:
tctx.configure(ml, map_local=["/foo/bar/" + str(tmpdir)])
with pytest.raises(Exception, match="Invalid regular expression"):
tctx.configure(ml, map_local=["/foo/+/" + str(tmpdir)])
with pytest.raises(Exception, match="Invalid file path"):
tctx.configure(ml, map_local=["/foo/.+/three"])
def test_simple(self, tmpdir):
ml = MapLocal()
with taddons.context(ml) as tctx:
tmpfile = tmpdir.join("foo.jpg")
tmpfile.write("foo")
tctx.configure(
ml,
map_local=[
"|//example.org/images|" + str(tmpdir)
]
)
f = tflow.tflow()
f.request.url = b"https://example.org/images/foo.jpg"
ml.request(f)
assert f.response.content == b"foo"
tmpfile = tmpdir.join("images", "bar.jpg")
tmpfile.write("bar", ensure=True)
tctx.configure(
ml,
map_local=[
"|//example.org|" + str(tmpdir)
]
)
f = tflow.tflow()
f.request.url = b"https://example.org/images/bar.jpg"
ml.request(f)
assert f.response.content == b"bar"
tmpfile = tmpdir.join("foofoobar.jpg")
tmpfile.write("foofoobar", ensure=True)
tctx.configure(
ml,
map_local=[
"|example.org/foo/foo/bar.jpg|" + str(tmpfile)
]
)
f = tflow.tflow()
f.request.url = b"https://example.org/foo/foo/bar.jpg"
ml.request(f)
assert f.response.content == b"foofoobar"
@pytest.mark.asyncio
async def test_nonexistent_files(self, tmpdir, monkeypatch):
ml = MapLocal()
with taddons.context(ml) as tctx:
tctx.configure(
ml,
map_local=[
"|example.org/css|" + str(tmpdir)
]
)
f = tflow.tflow()
f.request.url = b"https://example.org/css/nonexistent"
ml.request(f)
assert f.response.status_code == 404
await tctx.master.await_log("None of the local file candidates exist")
tmpfile = tmpdir.join("foo.jpg")
tmpfile.write("foo")
tctx.configure(
ml,
map_local=[
"|//example.org/images|" + str(tmpfile)
]
)
tmpfile.remove()
monkeypatch.setattr(Path, "is_file", lambda x: True)
f = tflow.tflow()
f.request.url = b"https://example.org/images/foo.jpg"
ml.request(f)
await tctx.master.await_log("could not read file")
def test_has_reply(self, tmpdir):
ml = MapLocal()
with taddons.context(ml) as tctx:
tmpfile = tmpdir.join("foo.jpg")
tmpfile.write("foo")
tctx.configure(
ml,
map_local=[
"|//example.org/images|" + str(tmpfile)
]
)
f = tflow.tflow()
f.request.url = b"https://example.org/images/foo.jpg"
f.reply.take()
ml.request(f)
assert not f.response
|
nonZero/OpenCommunity
|
refs/heads/master
|
src/communities/south_migrations/0013_auto__add_field_community_inform_system_manager.py
|
3
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Community.inform_system_manager'
db.add_column(u'communities_community', 'inform_system_manager',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Community.inform_system_manager'
db.delete_column(u'communities_community', 'inform_system_manager')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'communities.community': {
'Meta': {'object_name': 'Community'},
'allow_links_in_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'board_name': ('django.db.models.fields.CharField', [], {'default': "u'Board'", 'max_length': '200'}),
'default_quorum': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email_invitees': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inform_system_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'issue_ranking_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'official_identifier': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'referendum_ends_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'referendum_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'referendum_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'register_missing_board_members': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'straw_voting_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'k62mf6lqe6h4akppn8xb6ew1'", 'unique': 'True', 'max_length': '24'}),
'upcoming_meeting_comments': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_guests': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upcoming_meeting_location': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'upcoming_meeting_participants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'+'", 'blank': 'True', 'to': u"orm['users.OCUser']"}),
'upcoming_meeting_published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upcoming_meeting_summary': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_title': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'upcoming_meeting_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'voting_ends_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'users.ocuser': {
'Meta': {'object_name': 'OCUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
}
}
complete_apps = ['communities']
|
torchbox/sunburnt
|
refs/heads/master
|
sunburnt/sunburnt.py
|
1
|
from __future__ import absolute_import
import cgi
import cStringIO as StringIO
from itertools import islice
import logging
import socket, time, urllib, urlparse
import warnings
from .schema import SolrSchema, SolrError
from .search import LuceneQuery, MltSolrSearch, SolrSearch, params_from_dict
MAX_LENGTH_GET_URL = 2048
# Jetty default is 4096; Tomcat default is 8192; picking 2048 to be conservative.
class SolrConnection(object):
def __init__(self, url, http_connection, retry_timeout, max_length_get_url):
if http_connection:
self.http_connection = http_connection
else:
import httplib2
self.http_connection = httplib2.Http()
self.url = url.rstrip("/") + "/"
self.update_url = self.url + "update/"
self.select_url = self.url + "select/"
self.mlt_url = self.url + "mlt/"
self.retry_timeout = retry_timeout
self.max_length_get_url = max_length_get_url
def request(self, *args, **kwargs):
try:
return self.http_connection.request(*args, **kwargs)
except socket.error:
if self.retry_timeout < 0:
raise
time.sleep(self.retry_timeout)
return self.http_connection.request(*args, **kwargs)
def commit(self, waitSearcher=None, expungeDeletes=None, softCommit=None):
response = self.update('<commit/>', commit=True,
waitSearcher=waitSearcher, expungeDeletes=expungeDeletes, softCommit=softCommit)
def optimize(self, waitSearcher=None, maxSegments=None):
response = self.update('<optimize/>', optimize=True,
waitSearcher=waitSearcher, maxSegments=maxSegments)
# For both commit & optimize above, we use the XML body instead
# of the URL parameter, because if we're using POST (which we
# should) then only the former works.
def rollback(self):
response = self.update("<rollback/>")
def update(self, update_doc, **kwargs):
body = update_doc
if body:
headers = {"Content-Type":"text/xml; charset=utf-8"}
else:
headers = {}
url = self.url_for_update(**kwargs)
r, c = self.request(url, method="POST", body=body,
headers=headers)
if r.status != 200:
raise SolrError(r, c)
def url_for_update(self, commit=None, commitWithin=None, softCommit=None, optimize=None, waitSearcher=None, expungeDeletes=None, maxSegments=None):
extra_params = {}
if commit is not None:
extra_params['commit'] = "true" if commit else "false"
if commitWithin is not None:
try:
extra_params['commitWithin'] = str(int(commitWithin))
except (TypeError, ValueError):
raise ValueError("commitWithin should be a number in milliseconds")
if extra_params['commitWithin'] < 0:
raise ValueError("commitWithin should be a number in milliseconds")
if softCommit is not None:
extra_params['softCommit'] = "true" if softCommit else "false"
if optimize is not None:
extra_params['optimize'] = "true" if optimize else "false"
if waitSearcher is not None:
extra_params['waitSearcher'] = "true" if waitSearcher else "false"
if expungeDeletes is not None:
extra_params['expungeDeletes'] = "true" if expungeDeletes else "false"
if maxSegments is not None:
try:
extra_params['maxSegments'] = str(int(maxSegments))
except (TypeError, ValueError):
raise ValueError("maxSegments")
if extra_params['maxSegments'] <= 0:
raise ValueError("maxSegments should be a positive number")
if 'expungeDeletes' in extra_params and 'commit' not in extra_params:
raise ValueError("Can't do expungeDeletes without commit")
if 'maxSegments' in extra_params and 'optimize' not in extra_params:
raise ValueError("Can't do maxSegments without optimize")
if extra_params:
return "%s?%s" % (self.update_url, urllib.urlencode(sorted(extra_params.items())))
else:
return self.update_url
def select(self, params):
qs = urllib.urlencode(params)
url = "%s?%s" % (self.select_url, qs)
if len(url) > self.max_length_get_url:
warnings.warn("Long query URL encountered - POSTing instead of "
"GETting. This query will not be cached at the HTTP layer")
url = self.select_url
kwargs = dict(
method="POST",
body=qs,
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
else:
kwargs = dict(method="GET")
r, c = self.request(url, **kwargs)
if r.status != 200:
raise SolrError(r, c)
return c
def mlt(self, params, content=None):
"""Perform a MoreLikeThis query using the content specified
There may be no content if stream.url is specified in the params.
"""
qs = urllib.urlencode(params)
base_url = "%s?%s" % (self.mlt_url, qs)
if content is None:
kwargs = {'uri': base_url, 'method': "GET"}
else:
get_url = "%s&stream.body=%s" % (base_url, urllib.quote_plus(content))
if len(get_url) <= self.max_length_get_url:
kwargs = {'uri': get_url, 'method': "GET"}
else:
kwargs = {'uri': base_url, 'method': "POST",
'body': content, 'headers': {"Content-Type": "text/plain; charset=utf-8"}}
r, c = self.request(**kwargs)
if r.status != 200:
raise SolrError(r, c)
return c
class SolrInterface(object):
readable = True
writeable = True
remote_schema_file = "admin/file/?file=schema.xml"
def __init__(self, url, schemadoc=None, http_connection=None, mode='', retry_timeout=-1, max_length_get_url=MAX_LENGTH_GET_URL):
self.conn = SolrConnection(url, http_connection, retry_timeout, max_length_get_url)
self.schemadoc = schemadoc
if mode == 'r':
self.writeable = False
elif mode == 'w':
self.readable = False
self.init_schema()
def init_schema(self):
if self.schemadoc:
schemadoc = self.schemadoc
else:
r, c = self.conn.request(
urlparse.urljoin(self.conn.url, self.remote_schema_file))
if r.status != 200:
raise EnvironmentError("Couldn't retrieve schema document from server - received status code %s\n%s" % (r.status, c))
schemadoc = StringIO.StringIO(c)
self.schema = SolrSchema(schemadoc)
def add(self, docs, chunk=100, **kwargs):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
if hasattr(docs, "items") or not hasattr(docs, "__iter__"):
docs = [docs]
# to avoid making messages too large, we break the message every
# chunk docs.
for doc_chunk in grouper(docs, chunk):
update_message = self.schema.make_update(doc_chunk)
self.conn.update(str(update_message), **kwargs)
def delete(self, docs=None, queries=None, **kwargs):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
if not docs and not queries:
raise SolrError("No docs or query specified for deletion")
elif docs is not None and (hasattr(docs, "items") or not hasattr(docs, "__iter__")):
docs = [docs]
delete_message = self.schema.make_delete(docs, queries)
self.conn.update(str(delete_message), **kwargs)
def commit(self, *args, **kwargs):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
self.conn.commit(*args, **kwargs)
def optimize(self, *args, **kwargs):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
self.conn.optimize(*args, **kwargs)
def rollback(self):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
self.conn.rollback()
def delete_all(self):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
# When deletion is fixed to escape query strings, this will need fixed.
self.delete(queries=self.Q(**{"*":"*"}))
def search(self, **kwargs):
if not self.readable:
raise TypeError("This Solr instance is only for writing")
params = params_from_dict(**kwargs)
return self.schema.parse_response(self.conn.select(params))
def query(self, *args, **kwargs):
if not self.readable:
raise TypeError("This Solr instance is only for writing")
q = SolrSearch(self)
if len(args) + len(kwargs) > 0:
return q.query(*args, **kwargs)
else:
return q
def mlt_search(self, content=None, **kwargs):
if not self.readable:
raise TypeError("This Solr instance is only for writing")
params = params_from_dict(**kwargs)
return self.schema.parse_response(self.conn.mlt(params, content=content))
def mlt_query(self, fields=None, content=None, content_charset=None, url=None, query_fields=None,
**kwargs):
"""Perform a similarity query on MoreLikeThisHandler
The MoreLikeThisHandler is expected to be registered at the '/mlt'
endpoint in the solrconfig.xml file of the server.
fields is the list of field names to compute similarity upon. If not
provided, we just use the default search field.
query_fields can be used to adjust boosting values on a subset of those
fields.
Other MoreLikeThis specific parameters can be passed as kwargs without
the 'mlt.' prefix.
"""
if not self.readable:
raise TypeError("This Solr instance is only for writing")
q = MltSolrSearch(self, content=content, content_charset=content_charset, url=url)
return q.mlt(fields=fields, query_fields=query_fields, **kwargs)
def Q(self, *args, **kwargs):
q = LuceneQuery(self.schema)
q.add(args, kwargs)
return q
def grouper(iterable, n):
"grouper('ABCDEFG', 3) --> [['ABC'], ['DEF'], ['G']]"
i = iter(iterable)
g = list(islice(i, 0, n))
while g:
yield g
g = list(islice(i, 0, n))
|
karlp/pyusb
|
refs/heads/master
|
tests/test_util.py
|
8
|
# Copyright (C) 2009-2014 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
import utils
import unittest
from usb.util import *
from devinfo import *
from usb._debug import methodtrace
import usb.backend
class _ConfigurationDescriptor(object):
def __init__(self, bConfigurationValue):
self.bLength = 9
self.bDescriptorType = DESC_TYPE_CONFIG
self.wTotalLength = 18
self.bNumInterfaces = 0
self.bConfigurationValue = bConfigurationValue
self.iConfiguration = 0
self.bmAttributes = 0xc0
self.bMaxPower = 50
class _DeviceDescriptor(object):
def __init__(self):
self.configurations = (_ConfigurationDescriptor(1), _ConfigurationDescriptor(2))
self.bLength = 18
self.bDescriptorType = usb.util.DESC_TYPE_DEVICE
self.bcdUSB = 0x0200
self.idVendor = ID_VENDOR
self.idProduct = ID_PRODUCT
self.bcdDevice = 0x0001
self.iManufacturer = 0
self.iProduct = 0
self.iSerialNumber = 0
self.bNumConfigurations = len(self.configurations)
self.bMaxPacketSize0 = 64
self.bDeviceClass = 0xff
self.bDeviceSubClass = 0xff
self.bDeviceProtocol = 0xff
class FindDescriptorTest(unittest.TestCase):
@methodtrace(utils.logger)
def runTest(self):
d = usb.core.find(idVendor=ID_VENDOR)
if d is None:
return
self.assertEqual(find_descriptor(d, bConfigurationValue=10), None)
self.assertNotEqual(find_descriptor(d, bConfigurationValue=1), None)
self.assertEqual(len(list(find_descriptor(d, find_all=True, bConfigurationValue=10))), 0)
self.assertEqual(len(list(find_descriptor(d, find_all=True, bConfigurationValue=1))), 1)
self.assertEqual(len(list(find_descriptor(d, find_all=True))), d.bNumConfigurations)
self.assertEqual(find_descriptor(d, custom_match = lambda c: c.bConfigurationValue == 10), None)
self.assertNotEqual(find_descriptor(d, custom_match = lambda c: c.bConfigurationValue == 1), None)
self.assertEqual(len(list(find_descriptor(d, find_all=True, custom_match = lambda c: c.bConfigurationValue == 10))), 0)
self.assertEqual(len(list(find_descriptor(d, find_all=True, custom_match = lambda c: c.bConfigurationValue == 1))), 1)
self.assertEqual(find_descriptor(d, custom_match = lambda c: c.bConfigurationValue == 10, bLength=9), None)
self.assertNotEqual(find_descriptor(d, custom_match = lambda c: c.bConfigurationValue == 1, bLength=9), None)
cfg = find_descriptor(d)
self.assertTrue(isinstance(cfg, usb.core.Configuration))
intf = find_descriptor(cfg)
self.assertTrue(isinstance(intf, usb.core.Interface))
class UtilTest(unittest.TestCase):
@methodtrace(utils.logger)
def test_endpoint_address(self):
self.assertEqual(endpoint_address(0x01), 0x01)
self.assertEqual(endpoint_address(0x81), 0x01)
@methodtrace(utils.logger)
def test_endpoint_direction(self):
self.assertEqual(endpoint_direction(0x01), ENDPOINT_OUT)
self.assertEqual(endpoint_direction(0x81), ENDPOINT_IN)
@methodtrace(utils.logger)
def test_endpoint_type(self):
self.assertEqual(endpoint_type(ENDPOINT_TYPE_CTRL), ENDPOINT_TYPE_CTRL)
self.assertEqual(endpoint_type(ENDPOINT_TYPE_ISO), ENDPOINT_TYPE_ISO)
self.assertEqual(endpoint_type(ENDPOINT_TYPE_INTR), ENDPOINT_TYPE_INTR)
self.assertEqual(endpoint_type(ENDPOINT_TYPE_BULK), ENDPOINT_TYPE_BULK)
@methodtrace(utils.logger)
def test_ctrl_direction(self):
self.assertEqual(ctrl_direction(CTRL_OUT), CTRL_OUT)
self.assertEqual(ctrl_direction(CTRL_IN), CTRL_IN)
@methodtrace(utils.logger)
def test_build_request_type(self):
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_DEVICE), 0x00)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_INTERFACE), 0x01)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_ENDPOINT), 0x02)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_OTHER), 0x03)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_DEVICE), 0x20)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE), 0x21)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_ENDPOINT), 0x22)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_OTHER), 0x23)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_DEVICE), 0x40)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_INTERFACE), 0x41)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_ENDPOINT), 0x42)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_OTHER), 0x43)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_DEVICE), 0x60)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_INTERFACE), 0x61)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_ENDPOINT), 0x62)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_OTHER), 0x63)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_DEVICE), 0x80)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_INTERFACE), 0x81)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_ENDPOINT), 0x82)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_OTHER), 0x83)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_DEVICE), 0xa0)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE), 0xa1)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_ENDPOINT), 0xa2)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_OTHER), 0xa3)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_DEVICE), 0xc0)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_INTERFACE), 0xc1)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_ENDPOINT), 0xc2)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_OTHER), 0xc3)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_DEVICE), 0xe0)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_INTERFACE), 0xe1)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_ENDPOINT), 0xe2)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_OTHER), 0xe3)
def get_suite():
suite = unittest.TestSuite()
suite.addTest(FindDescriptorTest())
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(UtilTest))
return suite
if __name__ == '__main__':
utils.run_tests(get_suite())
|
jealone/shadowsocks
|
refs/heads/master
|
shadowsocks/daemon.py
|
386
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import logging
import signal
import time
from shadowsocks import common, shell
# this module is ported from ShadowVPN daemon.c
def daemon_exec(config):
if 'daemon' in config:
if os.name != 'posix':
raise Exception('daemon mode is only supported on Unix')
command = config['daemon']
if not command:
command = 'start'
pid_file = config['pid-file']
log_file = config['log-file']
if command == 'start':
daemon_start(pid_file, log_file)
elif command == 'stop':
daemon_stop(pid_file)
# always exit after daemon_stop
sys.exit(0)
elif command == 'restart':
daemon_stop(pid_file)
daemon_start(pid_file, log_file)
else:
raise Exception('unsupported daemon command %s' % command)
def write_pid_file(pid_file, pid):
import fcntl
import stat
try:
fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
stat.S_IRUSR | stat.S_IWUSR)
except OSError as e:
shell.print_exception(e)
return -1
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert flags != -1
flags |= fcntl.FD_CLOEXEC
r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
assert r != -1
# There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
# via fcntl.fcntl. So use lockf instead
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
except IOError:
r = os.read(fd, 32)
if r:
logging.error('already started at pid %s' % common.to_str(r))
else:
logging.error('already started')
os.close(fd)
return -1
os.ftruncate(fd, 0)
os.write(fd, common.to_bytes(str(pid)))
return 0
def freopen(f, mode, stream):
oldf = open(f, mode)
oldfd = oldf.fileno()
newfd = stream.fileno()
os.close(newfd)
os.dup2(oldfd, newfd)
def daemon_start(pid_file, log_file):
def handle_exit(signum, _):
if signum == signal.SIGTERM:
sys.exit(0)
sys.exit(1)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
# fork only once because we are sure parent will exit
pid = os.fork()
assert pid != -1
if pid > 0:
# parent waits for its child
time.sleep(5)
sys.exit(0)
# child signals its parent to exit
ppid = os.getppid()
pid = os.getpid()
if write_pid_file(pid_file, pid) != 0:
os.kill(ppid, signal.SIGINT)
sys.exit(1)
os.setsid()
signal.signal(signal.SIG_IGN, signal.SIGHUP)
print('started')
os.kill(ppid, signal.SIGTERM)
sys.stdin.close()
try:
freopen(log_file, 'a', sys.stdout)
freopen(log_file, 'a', sys.stderr)
except IOError as e:
shell.print_exception(e)
sys.exit(1)
def daemon_stop(pid_file):
import errno
try:
with open(pid_file) as f:
buf = f.read()
pid = common.to_str(buf)
if not buf:
logging.error('not running')
except IOError as e:
shell.print_exception(e)
if e.errno == errno.ENOENT:
# always exit 0 if we are sure daemon is not running
logging.error('not running')
return
sys.exit(1)
pid = int(pid)
if pid > 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.ESRCH:
logging.error('not running')
# always exit 0 if we are sure daemon is not running
return
shell.print_exception(e)
sys.exit(1)
else:
logging.error('pid is not positive: %d', pid)
# sleep for maximum 10s
for i in range(0, 200):
try:
# query for the pid
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
time.sleep(0.05)
else:
logging.error('timed out when stopping pid %d', pid)
sys.exit(1)
print('stopped')
os.unlink(pid_file)
def set_user(username):
if username is None:
return
import pwd
import grp
try:
pwrec = pwd.getpwnam(username)
except KeyError:
logging.error('user not found: %s' % username)
raise
user = pwrec[0]
uid = pwrec[2]
gid = pwrec[3]
cur_uid = os.getuid()
if uid == cur_uid:
return
if cur_uid != 0:
logging.error('can not set user as nonroot user')
# will raise later
# inspired by supervisor
if hasattr(os, 'setgroups'):
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
groups.insert(0, gid)
os.setgroups(groups)
os.setgid(gid)
os.setuid(uid)
|
williamfeng323/py-web
|
refs/heads/master
|
flask/lib/python3.6/site-packages/sqlalchemy/dialects/oracle/zxjdbc.py
|
33
|
# oracle/zxjdbc.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
:driverurl: http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
"""
import decimal
import re
from sqlalchemy import sql, types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.oracle.base import (OracleCompiler,
OracleDialect,
OracleExecutionContext)
from sqlalchemy.engine import result as _result
from sqlalchemy.sql import expression
import collections
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.date()
return process
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
# XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(str(value))
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(
expression._select_iterables(returning_cols))
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False)
for c in self.returning_cols]
if not hasattr(self, 'returning_parameters'):
self.returning_parameters = []
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(
self.dialect).get_dbapi_type(self.dialect.dbapi)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam(
"ret_%d" % i, value=ReturningParam(dbtype))
self.binds[bindparam.key] = bindparam
binds.append(
self.bindparam_string(self._truncate_bindparam(bindparam)))
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
def pre_exec(self):
if hasattr(self.compiled, 'returning_parameters'):
# prepare a zxJDBC statement so we can grab its underlying
# OraclePreparedStatement's getReturnResultSet later
self.statement = self.cursor.prepare(self.statement)
def get_result_proxy(self):
if hasattr(self.compiled, 'returning_parameters'):
rrs = None
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
next(rrs)
except SQLException as sqle:
msg = '%s [SQLCode: %d]' % (
sqle.getMessage(), sqle.getErrorCode())
if sqle.getSQLState() is not None:
msg += ' [SQLState: %s]' % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(
self.cursor.datahandler.getPyObject(
rrs, index, dbtype)
for index, dbtype in
self.compiled.returning_parameters)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
try:
rrs.close()
except SQLException:
pass
self.statement.close()
return _result.ResultProxy(self)
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
def __init__(self, context, returning_row):
self._returning_row = returning_row
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
ret = []
for c in self.context.compiled.returning_cols:
if hasattr(c, 'name'):
ret.append((c.name, c.type))
else:
ret.append((c.anon_label, c.type))
return ret
def _buffer_rows(self):
return collections.deque([self._returning_row])
class ReturningParam(object):
"""A bindparam value representing a RETURNING parameter.
Specially handled by OracleReturningDataHandler.
"""
def __init__(self, type):
self.type = type
def __eq__(self, other):
if isinstance(other, ReturningParam):
return self.type == other.type
return NotImplemented
def __ne__(self, other):
if isinstance(other, ReturningParam):
return self.type != other.type
return NotImplemented
def __repr__(self):
kls = self.__class__
return '<%s.%s object at 0x%x type=%s>' % (
kls.__module__, kls.__name__, id(self), self.type)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
jdbc_db_name = 'oracle'
jdbc_driver_name = 'oracle.jdbc.OracleDriver'
statement_compiler = OracleCompiler_zxjdbc
execution_ctx_cls = OracleExecutionContext_zxjdbc
colspecs = util.update_copy(
OracleDialect.colspecs,
{
sqltypes.Date: _ZxJDBCDate,
sqltypes.Numeric: _ZxJDBCNumeric
}
)
def __init__(self, *args, **kwargs):
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
global SQLException, zxJDBC
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object, dbtype=None):
if type(object) is ReturningParam:
statement.registerReturnParameter(index, object.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(
self, statement, index, object)
else:
OracleDataHandler.setJDBCObject(
self, statement, index, object, dbtype)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = \
connection.connection.driverversion >= '10.2'
def _create_jdbc_url(self, url):
return 'jdbc:oracle:thin:@%s:%s:%s' % (
url.host, url.port or 1521, url.database)
def _get_server_version_info(self, connection):
version = re.search(
r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
return tuple(int(x) for x in version.split('.'))
dialect = OracleDialect_zxjdbc
|
jeanlinux/calibre
|
refs/heads/master
|
src/cherrypy/lib/static.py
|
83
|
try:
from io import UnsupportedOperation
except ImportError:
UnsupportedOperation = object()
import logging
import mimetypes
mimetypes.init()
mimetypes.types_map['.dwg']='image/x-dwg'
mimetypes.types_map['.ico']='image/x-icon'
mimetypes.types_map['.bz2']='application/x-bzip2'
mimetypes.types_map['.gz']='application/x-gzip'
import os
import re
import stat
import time
import cherrypy
from cherrypy._cpcompat import ntob, unquote
from cherrypy.lib import cptools, httputil, file_generator_limited
def serve_file(path, content_type=None, disposition=None, name=None, debug=False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
response = cherrypy.serving.response
# If path is relative, users should fix it by making path absolute.
# That is, CherryPy should not guess where the application root is.
# It certainly should *not* use cwd (since CP may be invoked from a
# variety of paths). If using tools.staticdir, you can make your relative
# paths become absolute by supplying a value for "tools.staticdir.root".
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except OSError:
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
# Set content-type based on filename extension
ext = ""
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given file object.
The Content-Type header will be set to the content_type arg, if provided.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, 'filename' will
not be set. If disposition is None, no Content-Disposition header will
be written.
CAUTION: If the request contains a 'Range' header, one or more seek()s will
be performed on the file object. This may cause undesired behavior if
the file object is not seekable. It could also produce undesired results
if the caller set the read position of the file object prior to calling
serve_fileobj(), expecting that the data would be served starting from that
position.
"""
response = cherrypy.serving.response
try:
st = os.fstat(fileobj.fileno())
except AttributeError:
if debug:
cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
content_length = None
except UnsupportedOperation:
content_length = None
else:
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
content_length = st.st_size
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
cd = disposition
else:
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def _serve_fileobj(fileobj, content_type, content_length, debug=False):
"""Internal. Set response.body to the given file object, perhaps ranged."""
response = cherrypy.serving.response
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
request = cherrypy.serving.request
if request.protocol >= (1, 1):
response.headers["Accept-Ranges"] = "bytes"
r = httputil.get_ranges(request.headers.get('Range'), content_length)
if r == []:
response.headers['Content-Range'] = "bytes */%s" % content_length
message = "Invalid Range (first-byte-pos greater than Content-Length)"
if debug:
cherrypy.log(message, 'TOOLS.STATIC')
raise cherrypy.HTTPError(416, message)
if r:
if len(r) == 1:
# Return a single-part response.
start, stop = r[0]
if stop > content_length:
stop = content_length
r_len = stop - start
if debug:
cherrypy.log('Single part; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
response.status = "206 Partial Content"
response.headers['Content-Range'] = (
"bytes %s-%s/%s" % (start, stop - 1, content_length))
response.headers['Content-Length'] = r_len
fileobj.seek(start)
response.body = file_generator_limited(fileobj, r_len)
else:
# Return a multipart/byteranges response.
response.status = "206 Partial Content"
try:
# Python 3
from email.generator import _make_boundary as choose_boundary
except ImportError:
# Python 2
from mimetools import choose_boundary
boundary = choose_boundary()
ct = "multipart/byteranges; boundary=%s" % boundary
response.headers['Content-Type'] = ct
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
def file_ranges():
# Apache compatibility:
yield ntob("\r\n")
for start, stop in r:
if debug:
cherrypy.log('Multipart; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
yield ntob("--" + boundary, 'ascii')
yield ntob("\r\nContent-type: %s" % content_type, 'ascii')
yield ntob("\r\nContent-range: bytes %s-%s/%s\r\n\r\n"
% (start, stop - 1, content_length), 'ascii')
fileobj.seek(start)
for chunk in file_generator_limited(fileobj, stop-start):
yield chunk
yield ntob("\r\n")
# Final boundary
yield ntob("--" + boundary + "--", 'ascii')
# Apache compatibility:
yield ntob("\r\n")
response.body = file_ranges()
return response.body
else:
if debug:
cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
response.headers['Content-Length'] = content_length
response.body = fileobj
return response.body
def serve_download(path, name=None):
"""Serve 'path' as an application/x-download attachment."""
# This is such a common idiom I felt it deserved its own wrapper.
return serve_file(path, "application/x-download", "attachment", name)
def _attempt(filename, content_types, debug=False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' %
(filename, content_types), 'TOOLS.STATICDIR')
try:
# you can set the content types for a
# complete directory per extension
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
# If we didn't find the static file, continue handling the
# request. We might find a dynamic handler instead.
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def staticdir(section, dir, root="", match="", content_types=None, index="",
debug=False):
"""Serve a static resource from the given (root +) dir.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
index
If provided, it should be the (relative) name of a file to
serve for directory requests. For example, if the dir argument is
'/home/me', the Request-URI is 'myapp', and the index arg is
'index.html', the file '/home/me/myapp/index.html' will be sought.
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICDIR')
return False
# Allow the use of '~' to refer to a user's home directory.
dir = os.path.expanduser(dir)
# If dir is relative, make absolute using "root".
if not os.path.isabs(dir):
if not root:
msg = "Static dir requires an absolute dir (or root)."
if debug:
cherrypy.log(msg, 'TOOLS.STATICDIR')
raise ValueError(msg)
dir = os.path.join(root, dir)
# Determine where we are in the object tree relative to 'section'
# (where the static tool was defined).
if section == 'global':
section = "/"
section = section.rstrip(r"\/")
branch = request.path_info[len(section) + 1:]
branch = unquote(branch.lstrip(r"\/"))
# If branch is "", filename will end in a slash
filename = os.path.join(dir, branch)
if debug:
cherrypy.log('Checking file %r to fulfill %r' %
(filename, request.path_info), 'TOOLS.STATICDIR')
# There's a chance that the branch pulled from the URL might
# have ".." or similar uplevel attacks in it. Check that the final
# filename is a child of dir.
if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
raise cherrypy.HTTPError(403) # Forbidden
handled = _attempt(filename, content_types)
if not handled:
# Check for an index file if a folder was requested.
if index:
handled = _attempt(os.path.join(filename, index), content_types)
if handled:
request.is_index = filename[-1] in (r"\/")
return handled
def staticfile(filename, root=None, match="", content_types=None, debug=False):
"""Serve a static resource from the given (root +) filename.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICFILE')
return False
# If filename is relative, make absolute using "root".
if not os.path.isabs(filename):
if not root:
msg = "Static tool requires an absolute filename (got '%s')." % filename
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
filename = os.path.join(root, filename)
return _attempt(filename, content_types, debug=debug)
|
rytaft/h-store
|
refs/heads/master
|
tools/hstore/fabric/abstractfabric.py
|
9
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (C) 2013 by H-Store Project
# Brown University
# Massachusetts Institute of Technology
# Yale University
#
# http://hstore.cs.brown.edu/
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# -----------------------------------------------------------------------
from __future__ import with_statement
import os
import sys
import re
import math
import time
import logging
import paramiko
import string
from datetime import datetime
from StringIO import StringIO
from pprint import pformat
## H-Store Third-Party Libraries
realpath = os.path.realpath(__file__)
basedir = os.path.dirname(realpath)
if not os.path.exists(realpath):
cwd = os.getcwd()
basename = os.path.basename(realpath)
if os.path.exists(os.path.join(cwd, basename)):
basedir = cwd
sys.path.append(os.path.realpath(os.path.join(basedir, "../../third_party/python")))
from fabric.api import *
from fabric.contrib.files import *
## =====================================================================
## LOGGING CONFIGURATION
## =====================================================================
LOG = logging.getLogger(__name__)
LOG_handler = logging.StreamHandler()
LOG_formatter = logging.Formatter(fmt='%(asctime)s [%(funcName)s:%(lineno)03d] %(levelname)-5s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S')
LOG_handler.setFormatter(LOG_formatter)
LOG.addHandler(LOG_handler)
LOG.setLevel(logging.INFO)
## =====================================================================
## DEPLOYMENT CONFIGURATION
## =====================================================================
ENV_DEFAULT = {
# Fabric Options
"key_filename": os.path.join(os.environ["HOME"], ".ssh/hstore.pem"),
"user": os.environ["USER"],
"disable_known_hosts": True,
"no_agent": True,
"port": 22,
# Client Options
"client.count": 1,
"client.threads_per_host": 500,
# H-Store Options
"hstore.basedir": None,
"hstore.git": "git://github.com/apavlo/h-store.git",
"hstore.git_branch": "master",
"hstore.git_options": "",
"hstore.clean": False,
"hstore.exec_prefix": "",
"hstore.partitions": 6,
"hstore.sites_per_host": 1,
"hstore.partitions_per_site": 8,
"hstore.round_robin_partitions": True,
}
## =====================================================================
## AbstractFabric
## =====================================================================
class AbstractFabric(object):
def __init__(self, env, envUpdates):
self.env = env
self.updateEnv(ENV_DEFAULT)
self.updateEnv(envUpdates)
self.hstore_dir = os.path.join(self.env["hstore.basedir"], "h-store")
LOG.debug("HSTORE DIR: %s", self.hstore_dir)
self.running_instances = [ ]
self.all_instances = [ ]
self.partitionCount = self.env["hstore.partitions"]
self.clientCount = self.env["client.count"]
if not self.env.get("hstore.num_hosts_round_robin", None) is None:
self.hostCount = int(self.env["hstore.num_hosts_round_robin"])
self.siteCount = self.hostCount
else:
self.siteCount = int(math.ceil(self.partitionCount / float(self.env["hstore.partitions_per_site"])))
self.hostCount = int(math.ceil(self.siteCount / float(self.env["hstore.sites_per_host"])))
## DEF
def updateEnv(self, envUpdates):
for k, v in envUpdates.iteritems():
self.env[k] = v
if v:
t = type(v)
LOG.debug("%s [%s] => %s" % (k, t, self.env[k]))
self.env[k] = t(self.env[k])
## FOR
## DEF
## =====================================================================
## IMPLEMENTATION API
## =====================================================================
def stop_cluster(self, **kwargs):
"""Stop all instances in the cluster"""
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def __startInstances__(self, **kwargs):
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def updateLog4j(self, reset=False, debug=[], trace=[]):
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def sync_time(self):
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def getInstance(self, public_dns_name):
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def getAllInstances(self):
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def getRunningSiteInstances():
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def getRunningClientInstances():
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
## =====================================================================
## MAIN API
## =====================================================================
## ----------------------------------------------
## get_version
## ----------------------------------------------
def get_version(self, inst):
"""Get the current Git commit id and date in the deployment directory"""
with settings(host_string=inst.public_dns_name):
with cd(self.hstore_dir):
output = run("git log --pretty=format:' %h %at ' -n 1")
data = map(string.strip, output.split(" "))
rev_id = str(data[1])
rev_date = datetime.fromtimestamp(int(data[2]))
LOG.info("Revision: %s / %s" % (rev_id, rev_date))
return (rev_id, rev_date)
## WITH
## DEF
def get_file(self, inst, filePath):
"""Retrieve and print the file from the cluster for the given path"""
sio = StringIO()
with settings(host_string=inst.public_dns_name):
if get(filePath, local_path=sio).failed:
raise Exception("Failed to retrieve remote file '%s'" % filePath)
return sio.getvalue()
## DEF
## ---------------------------------------------------------------------
## INTERNAL API
## ---------------------------------------------------------------------
def exec_benchmark(self, inst, project, \
removals=[ ], json=False, build=True, trace=False, \
updateJar=True, updateConf=True, updateRepo=False, resetLog4j=False, \
extraParams={ } ):
## Make sure we have enough instances
if (self.hostCount + self.clientCount) > len(self.running_instances):
raise Exception("Needed %d host + %d client instances but only %d are currently running" % (\
self.hostCount, self.clientCount, len(self.running_instances)))
hosts = [ ]
clients = [ ]
host_id = 0
site_id = 0
partition_id = 0
partitions_per_site = self.env["hstore.partitions_per_site"]
## HStore Sites
site_hosts = set()
## Attempt to assign the same number of partitions to nodes
if self.env.get("hstore.round_robin_partitions", False):
sites_needed = math.ceil(self.env["hstore.partitions"] / float(partitions_per_site))
partitions_per_site = math.ceil(self.env["hstore.partitions"] / float(sites_needed))
LOG.debug("Partitions Needed: %d" % self.env["hstore.partitions"])
LOG.debug("Partitions Per Site: %d" % partitions_per_site)
LOG.debug("Sites Per Host: %d" % self.env["hstore.sites_per_host"])
for siteInst in self.getRunningInstances():
site_hosts.add(siteInst.private_dns_name)
for i in range(self.env["hstore.sites_per_host"]):
firstPartition = partition_id
lastPartition = min(self.env["hstore.partitions"], firstPartition + partitions_per_site)-1
host = "%s:%d:%d" % (siteInst.private_dns_name, site_id, firstPartition)
if firstPartition != lastPartition:
host += "-%d" % lastPartition
partition_id += partitions_per_site
site_id += 1
hosts.append(host)
if lastPartition+1 == self.env["hstore.partitions"]: break
## FOR (SITES)
if lastPartition+1 == self.env["hstore.partitions"]: break
## FOR
LOG.debug("Last Partition: %d", lastPartition)
LOG.debug("Site Hosts: %s" % site_hosts)
assert len(hosts) > 0
## HStore Clients
for clientInst in self.getRunningInstances():
if clientInst.private_dns_name in site_hosts: continue
clients.append(clientInst.private_dns_name)
## FOR
LOG.debug("Client Hosts: %s" % clients)
assert len(clients) > 0
## Make sure the the checkout is up to date
if updateRepo:
LOG.info("Updating H-Store Git checkout")
self.deploy_hstore(build=build, update=True)
## Update H-Store Conf file
## Do this after we update the repository so that we can put in our updates
if updateConf:
LOG.info("Updating H-Store configuration files")
self.write_conf(project, removals, revertFirst=True)
if resetLog4j:
LOG.info("Reverting log4j.properties")
self.resetLog4j()
## Construct dict of command-line H-Store options
hstore_options = {
"client.hosts": ",".join(clients),
"client.count": self.env["client.count"],
"client.threads_per_host": self.env["client.threads_per_host"],
"project": project,
"hosts": '"%s"' % ";".join(hosts),
}
if json: hstore_options["client.output_results_json"] = True
if trace:
hstore_options["trace"] = "traces/%s-%d" % (project, time.time())
LOG.debug("Enabling trace files that will be output to '%s'" % hstore_options["trace"])
LOG.debug("H-Store Config:\n" + pformat(hstore_options))
## Extra Parameters
if extraParams:
hstore_options = dict(hstore_options.items() + extraParams.items())
## Any other option not listed in the above dict should be written to
## a properties file
workloads = None
hstore_opts_cmd = " ".join(map(lambda x: "-D%s=%s" % (x, hstore_options[x]), hstore_options.keys()))
with settings(host_string=inst.public_dns_name):
with cd(self.hstore_dir):
prefix = self.env["hstore.exec_prefix"]
if updateJar:
LOG.info("Updating H-Store %s project jar file" % (project.upper()))
cmd = "ant %s hstore-prepare %s" % (prefix, hstore_opts_cmd)
run(cmd)
projectFile = os.path.join(self.hstore_dir, project+".jar")
for other in self.running_instances:
if other == inst: continue
run("scp %s %s:%s" % (projectFile, other.public_dns_name, projectFile))
## IF
LOG.info("Running benchmark on %s", inst)
cmd = "ant %s hstore-benchmark %s" % (prefix, hstore_opts_cmd)
output = run(cmd)
## If they wanted a trace file, then we have to ship it back to ourselves
if trace:
output = "/tmp/hstore/workloads/%s.trace" % project
combine_opts = {
"project": project,
"global.memory": 5000,
"output": output,
"workload": hstore_options["trace"] + "*",
}
LOG.debug("Combine %s workload traces into '%s'" % (project.upper(), output))
combine_opts_cmd = " ".join(map(lambda x: "-D%s=%s" % (x, combine_opts[x]), combine_opts.keys()))
run("ant workload-combine %s" % combine_opts_cmd)
workloads = get(output + ".gz")
## IF
## WITH
## WITH
assert output
return output, workloads
## DEF
## ----------------------------------------------
## __setupInstance__
## ----------------------------------------------
def __setupInstance__(self, inst, build=True, update=True):
need_files = False
with settings(host_string=inst.public_dns_name):
with settings(warn_only=True):
if run("test -d %s" % self.hstore_dir).failed:
with cd(os.path.dirname(self.hstore_dir)):
LOG.debug("Initializing H-Store source code directory for branch '%s'" % self.env["hstore.git_branch"])
run("git clone --branch %s %s %s" % (self.env["hstore.git_branch"], \
self.env["hstore.git_options"], \
self.env["hstore.git"]))
update = True
need_files = True
## WITH
with cd(self.hstore_dir):
run("git checkout %s" % self.env["hstore.git_branch"])
if update:
LOG.debug("Pulling in latest changes for branch '%s'" % self.env["hstore.git_branch"])
run("git checkout -- properties")
run("git pull %s" % self.env["hstore.git_options"])
## Checkout Extra Files
with settings(warn_only=True):
if run("test -d %s" % "files").failed:
LOG.debug("Initializing H-Store research files directory for branch '%s'" % self.env["hstore.git_branch"])
run("ant junit-getfiles")
elif update:
LOG.debug("Pulling in latest research files for branch '%s'" % self.env["hstore.git_branch"])
run("ant junit-getfiles-update")
## IF
## WITH
if build:
LOG.debug("Building H-Store from source code")
if self.env["hstore.clean"]:
run("ant clean-all")
run("ant build")
## WITH
## WITH
run("cd %s" % self.hstore_dir)
## WITH
## DEF
## ----------------------------------------------
## __writeConf__
## ----------------------------------------------
def __writeConf__(self, inst, project, removals=[ ], revertFirst=False):
prefix_include = [ 'site', 'client', 'global', 'benchmark' ]
hstoreConf_updates = { }
hstoreConf_removals = set()
benchmarkConf_updates = { }
benchmarkConf_removals = set()
with settings(host_string=inst.public_dns_name):
for key in self.env.keys():
prefix = key.split(".")[0]
if not prefix in prefix_include: continue
if prefix == "benchmark":
benchmarkConf_updates[key.split(".")[-1]] = self.env[key]
else:
hstoreConf_updates[key] = self.env[key]
## FOR
for key in removals:
prefix = key.split(".")[0]
if not prefix in prefix_include: continue
if prefix == "benchmark":
key = key.split(".")[-1]
assert not key in benchmarkConf_updates, key
benchmarkConf_removals.add(key)
else:
assert not key in hstoreConf_updates, key
hstoreConf_removals.add(key)
## FOR
toUpdate = [
("properties/default.properties", hstoreConf_updates, hstoreConf_removals),
("properties/benchmarks/%s.properties" % project, benchmarkConf_updates, benchmarkConf_removals),
]
with cd(self.hstore_dir):
for _file, _updates, _removals in toUpdate:
if revertFirst:
LOG.info("Reverting '%s'" % _file)
run("git checkout %s -- %s" % (self.env["hstore.git_options"], _file))
self.__updateConf__(inst, _file, _updates, _removals)
## FOR
## WITH
## WITH
## DEF
## ----------------------------------------------
## __updateConf__
## ----------------------------------------------
def __updateConf__(self, inst, conf_file, updates={ }, removals=[ ], noSpaces=False):
LOG.info("Updating configuration file '%s' - Updates[%d] / Removals[%d]", conf_file, len(updates), len(removals))
contents = self.get_file(inst, conf_file)
assert len(contents) > 0, "Configuration file '%s' is empty" % conf_file
first = True
space = "" if noSpaces else " "
## Keys we want to update/insert
for key in sorted(updates.keys()):
val = updates[key]
hstore_line = "%s%s=%s%s" % (key, space, space, val)
regex = "^(?:#)*[\s]*%s[ ]*=[ ]*.*" % re.escape(key)
m = re.search(regex, contents, re.MULTILINE)
if not m:
if first: contents += "\n"
contents += hstore_line + "\n"
first = False
LOG.debug("Added '%s' in %s with value '%s'" % (key, conf_file, val))
else:
contents = contents.replace(m.group(0), hstore_line)
LOG.debug("Updated '%s' in %s with value '%s'" % (key, conf_file, val))
## IF
## FOR
## Keys we need to completely remove from the file
for key in removals:
if contents.find(key) != -1:
regex = "%s[ ]*=.*" % re.escape(key)
contents = re.sub(regex, "", contents)
LOG.debug("Removed '%s' in %s" % (key, conf_file))
## FOR
## FOR
sio = StringIO()
sio.write(contents)
with settings(host_string=inst.public_dns_name):
put(local_path=sio, remote_path=conf_file)
## WITH
## DEF
def __resetDebugging__(self, inst):
with settings(host_string=inst.public_dns_name):
with cd(self.hstore_dir):
run("git checkout %s -- %s" % (self.env["hstore.git_options"], "log4j.properties"))
## DEF
def __updateLog4j__(self, inst, debug=[], trace=[]):
LOG.info("Updating log4j properties - DEBUG[%d] / TRACE[%d]", len(debug), len(trace))
conf_file = os.path.join(self.hstore_dir, "log4j.properties")
targetLevels = {
"DEBUG": debug,
"TRACE": trace,
}
with settings(host_string=inst.public_dns_name):
contents = self.get_file(inst, conf_file)
assert len(contents) > 0, "Configuration file '%s' is empty" % conf_file
# Go through the file and update anything that is already there
baseRegex = r"(log4j\.logger\.(?:%s))[\s]*=[\s]*(?:INFO|DEBUG|TRACE)(|,[\s]+[\w]+)"
for level, clazzes in targetLevels.iteritems():
contents = re.sub(baseRegex % "|".join(map(string.strip, clazzes)),
r"\1="+level+r"\2",
contents, flags=re.IGNORECASE)
# Then add in anybody that is missing
first = True
for level, clazzes in targetLevels.iteritems():
for clazz in clazzes:
if contents.find(clazz) == -1:
if first: contents += "\n"
contents += "\nlog4j.logger.%s=%s" % (clazz, level)
first = False
## FOR
sio = StringIO()
sio.write(contents)
put(local_path=sio, remote_path=conf_file)
## WITH
## DEF
## ----------------------------------------------
## __clearLogs__
## ----------------------------------------------
def __clearLogs__(self, inst):
"""Remove all of the log files on the remote cluster"""
with settings(host_string=inst.public_dns_name):
with settings(warn_only=True):
LOG.info("Clearing H-Store log files [%s]" % self.env["hstore.git_branch"])
log_dir = self.env.get("site.log_dir", os.path.join(self.hstore_dir, "obj/logs/sites"))
run("rm -rf %s/*" % log_dir)
## WITH
## DEF
## CLASS
|
azureplus/hue
|
refs/heads/master
|
desktop/core/ext-py/guppy-0.1.10/guppy/heapy/test/test_RefPat.py
|
37
|
from guppy.heapy.test import support
import StringIO, sys, types, unittest
class TestCase(support.TestCase):
def setUp(self):
support.TestCase.setUp(self)
self.RefPat = self.heapy.RefPat
self.iso = self.Use.iso
def makegraph(self, width, length):
# Generate a structure which will yield a high number
# of shortest paths.
# Returns a pair src, dst which are connected via a noncyclic graph
# with many edges.
# The length of each path (all shortest), number of edges will be length
# The number of nodes will be 2 + width * (length - 1)
# The number of paths will be
# width ** length, if width >= 1 and length >= 1
dst = []
ls = []
for i in range(width):
ls.append([dst])
ls = [dst] * width
for i in range(length-1):
xs = []
for j in range(width):
ys = []
xs.append(ys)
for k in range(width):
ys.append(ls[k])
ls = xs
src = ls
return src, dst
def makegraph(self, width, length):
# Generate a structure which will yield a high number
# of shortest paths.
# Returns a pair src, dst which are connected via a noncyclic graph
# with many edges.
# The length of each path (all shortest), number of edges will be length
# The number of nodes will be 2 + width * (length - 1)
# The number of paths will be
# width ** length, if width >= 1 and length >= 1
dst = []
ls = []
for i in range(width):
ls.append([dst])
ls = [dst] * width
for i in range(length-1):
xs = []
for j in range(width):
ys = []
xs.append(ys)
for k in range(width):
ys.append(ls[k])
ls = xs
src = ls
return src, dst
def rp(self, dst, src, **kwds):
iso = self.iso
if src is not None:
src = iso(src)
rp = iso(dst).get_rp(src=src, **kwds)
rp.mod.UniSet.summary_str.str_address = lambda a: '<address>'
return rp
class RefPatCase(TestCase):
def test_basic_methods(self):
# Test basic methods: iteration, indexing, length, tree addressing via attribute access
# Test iteration
dst = src = []
lists = [dst]
for i in range(5):
src = [src]
lists.append(src)
rp = self.rp(dst, src,depth=10)
for i, x in enumerate(rp):
if i < len(lists):
self.asis(lists[i], x.theone)
# Test indexing
# First case, when already iterated over
self.asis( rp[0].theone, lists[0] )
self.asis( rp[-2].theone, lists[-1])
# Second case, when not iterated over before
rp = self.rp(dst, src,depth=10)
self.asis( rp[0].theone, lists[0] )
self.asis( rp[-2].theone, lists[-1])
# Test length
self.aseq( len(rp), len(lists) + 1)
rp = self.rp(dst, src,depth=10)
self.aseq( len(rp), len(lists) + 1)
# Test attribute access
self.asis(rp._.theone, lists[0])
self.asis(rp.a.theone, lists[1])
# Test attribute access, when not iterated over before
rp = self.rp(dst, src,depth=10)
self.asis(rp.a2.theone, lists[2])
self.asis(rp.a.theone, lists[1])
# Make sure attribute access is cached:
# so it doesn't change when struct is changed
lists[2].append(lists[0])
rp.View.clear_retainers()
rp.View.update_referrers(self.iso(lists[0]))
self.asis(rp.a.theone, lists[1])
# Test with recursive structure
dst = []
dst.append(dst)
src = [dst]
rp = self.rp(dst, src)
self.asis(rp._.theone, dst)
self.aseq(rp.a, self.iso(dst, src))
self.aseq(rp.a, rp.a2)
self.aseq(rp.a, rp[1])
def test_presentation(self):
output = StringIO.StringIO()
src = []
def write(x):
print >>output, x
R = self.RefPat
def test_pp(dst, src, result=None, **kwds):
rp = self.rp(dst, src, **kwds)
write( repr(rp) )
return rp
dst = []
src.append(dst)
#print R.refpat(dst=dst)
test_pp(dst, src)
for i in range(5):
x = dst
dst = []
x.append(dst)
test_pp(dst, src)
src, dst = self.makegraph(5,7)
test_pp(dst, src, depth=10)
# Test that pp() prints limited number of lines
src, dst = self.makegraph(5,17)
rp = test_pp(dst, src, depth=17)
write( repr(rp.more) )
# Test more of more
src, dst = self.makegraph(1,30)
rp = test_pp(dst, src, depth=35)
m = rp.more
write( repr(m) )
write( repr(m.more) )
m1 = m.more
write( repr(m1) )
m2 = m.more
write( repr(m2.more) )
write( str(m1.more) ) # Test also that str() is the same as repr()
# Test that we get back to start by .top
write( m1.top )
# Test that we get back to previous by .prev
write( m1.prev )
if 0:
# I don't know if I really want this, after new general output handling
# Test that .top works at the top
write( m1.top.top )
#pdb.set_trace()
# Test that they won't say '...more lines...' if the # of lines is what is printed
src, dst = self.makegraph(1,30)
rp = test_pp(dst, src, depth=10)
# Test how no more lines is printed
write( rp.more)
write( rp.more.more)
# Test that one more line is printed rather than '1 more line'
src, dst = self.makegraph(1,30)
rp = test_pp(dst, src, depth=21)
write( rp.more)
# Test that we can do more without first printing
rp = self.rp(dst, src, depth=20)
write( rp.more )
if 0:
print output.getvalue()
else:
self.aseq(output.getvalue(), """\
Reference Pattern by <[dict of] class>.
0: _ --- [-] 1 list: <address>*0
1: a [-] 1 list: <address>*1
2: aa ---- [R] 1 tuple: <address>*1
Reference Pattern by <[dict of] class>.
0: _ --- [-] 1 list: <address>*0
1: a [-] 1 list: <address>*1
2: aa ---- [-] 1 list: <address>*1
3: a3 [-] 1 list: <address>*1
4: a4 ------ [-] 1 list: <address>*1
5: a5 [-] 1 list: <address>*1
6: a6 -------- [-] 1 list: <address>*1
7: a7 [R] 1 tuple: <address>*1
Reference Pattern by <[dict of] class>.
0: _ --- [-] 1 list: <address>*0
1: a [-] 5 list: <address>*5, <address>*5, <address>*5, <address>*5...
2: aa ---- [-] 5 list: <address>*5, <address>*5, <address>*5, <address>*5...
3: a3 [-] 5 list: <address>*5, <address>*5, <address>*5, <address>*5...
4: a4 ------ [-] 5 list: <address>*5, <address>*5, <address>*5, <address>*5...
5: a5 [-] 5 list: <address>*5, <address>*5, <address>*5, <address>*5...
6: a6 -------- [-] 5 list: <address>*5, <address>*5, <address>*5...
7: a7 [-] 1 list: <address>*5
8: a8 ---------- [R] 1 tuple: <address>*1
Reference Pattern by <[dict of] class>.
0: _ --- [-] 1 list: <address>*0
1: a [-] 5 list: <address>*5, <address>*5, <address>*5, <address>*5...
2: aa ---- [-] 5 list: <address>*5, <address>*5, <address>*5, <address>*5...
3: a3 [-] 5 list: <address>*5, <address>*5, <address>*5, <address>*5...
4: a4 ------ [-] 5 list: <address>*5, <address>*5, <address>*5, <address>*5...
5: a5 [-] 5 list: <address>*5, <address>*5, <address>*5, <address>*5...
6: a6 -------- [-] 5 list: <address>*5, <address>*5, <address>*5...
7: a7 [-] 5 list: <address>*5, <address>*5, <address>*5...
8: a8 ---------- [-] 5 list: <address>*5, <address>*5, <address>*5...
9: a9 [-] 5 list: <address>*5, <address>*5, <address>*5...
<Type e.g. '_.more' for more.>
10: a10 ----------- [-] 5 list: <address>*5, <address>*5, <address>*5...
11: a11 [-] 5 list: <address>*5, <address>*5, <address>*5...
12: a12 ------------- [-] 5 list: <address>*5, <address>*5, <address>*5...
13: a13 [-] 5 list: <address>*5, <address>*5, <address>*5...
14: a14 --------------- [-] 5 list: <address>*5, <address>*5, <address>*5...
15: a15 [-] 5 list: <address>*5, <address>*5, <address>*5...
16: a16 ----------------- [-] 5 list: <address>*5, <address>*5, <address>*5...
17: a17 [+] 1 list: <address>*5
Reference Pattern by <[dict of] class>.
0: _ --- [-] 1 list: <address>*0
1: a [-] 1 list: <address>*1
2: aa ---- [-] 1 list: <address>*1
3: a3 [-] 1 list: <address>*1
4: a4 ------ [-] 1 list: <address>*1
5: a5 [-] 1 list: <address>*1
6: a6 -------- [-] 1 list: <address>*1
7: a7 [-] 1 list: <address>*1
8: a8 ---------- [-] 1 list: <address>*1
9: a9 [-] 1 list: <address>*1
<Type e.g. '_.more' for more.>
10: a10 ----------- [-] 1 list: <address>*1
11: a11 [-] 1 list: <address>*1
12: a12 ------------- [-] 1 list: <address>*1
13: a13 [-] 1 list: <address>*1
14: a14 --------------- [-] 1 list: <address>*1
15: a15 [-] 1 list: <address>*1
16: a16 ----------------- [-] 1 list: <address>*1
17: a17 [-] 1 list: <address>*1
18: a18 ------------------- [-] 1 list: <address>*1
19: a19 [-] 1 list: <address>*1
<Type e.g. '_.more' for more.>
20: a20 --------------------- [-] 1 list: <address>*1
21: a21 [-] 1 list: <address>*1
22: a22 ----------------------- [-] 1 list: <address>*1
23: a23 [-] 1 list: <address>*1
24: a24 ------------------------- [-] 1 list: <address>*1
25: a25 [-] 1 list: <address>*1
26: a26 --------------------------- [-] 1 list: <address>*1
27: a27 [-] 1 list: <address>*1
28: a28 ----------------------------- [-] 1 list: <address>*1
29: a29 [-] 1 list: <address>*1
<Type e.g. '_.more' for more.>
20: a20 --------------------- [-] 1 list: <address>*1
21: a21 [-] 1 list: <address>*1
22: a22 ----------------------- [-] 1 list: <address>*1
23: a23 [-] 1 list: <address>*1
24: a24 ------------------------- [-] 1 list: <address>*1
25: a25 [-] 1 list: <address>*1
26: a26 --------------------------- [-] 1 list: <address>*1
27: a27 [-] 1 list: <address>*1
28: a28 ----------------------------- [-] 1 list: <address>*1
29: a29 [-] 1 list: <address>*1
<Type e.g. '_.more' for more.>
30: a30 ------------------------------- [-] 1 list: <address>*1
31: a31 [R] 1 tuple: <address>*1
30: a30 ------------------------------- [-] 1 list: <address>*1
31: a31 [R] 1 tuple: <address>*1
Reference Pattern by <[dict of] class>.
0: _ --- [-] 1 list: <address>*0
1: a [-] 1 list: <address>*1
2: aa ---- [-] 1 list: <address>*1
3: a3 [-] 1 list: <address>*1
4: a4 ------ [-] 1 list: <address>*1
5: a5 [-] 1 list: <address>*1
6: a6 -------- [-] 1 list: <address>*1
7: a7 [-] 1 list: <address>*1
8: a8 ---------- [-] 1 list: <address>*1
9: a9 [-] 1 list: <address>*1
<22 more lines. Type e.g. '_.more' for more.>
10: a10 ----------- [-] 1 list: <address>*1
11: a11 [-] 1 list: <address>*1
12: a12 ------------- [-] 1 list: <address>*1
13: a13 [-] 1 list: <address>*1
14: a14 --------------- [-] 1 list: <address>*1
15: a15 [-] 1 list: <address>*1
16: a16 ----------------- [-] 1 list: <address>*1
17: a17 [-] 1 list: <address>*1
18: a18 ------------------- [-] 1 list: <address>*1
19: a19 [-] 1 list: <address>*1
<12 more lines. Type e.g. '_.more' for more.>
Reference Pattern by <[dict of] class>.
0: _ --- [-] 1 list: <address>*0
1: a [-] 1 list: <address>*1
2: aa ---- [-] 1 list: <address>*1
3: a3 [-] 1 list: <address>*1
4: a4 ------ [-] 1 list: <address>*1
5: a5 [-] 1 list: <address>*1
6: a6 -------- [-] 1 list: <address>*1
7: a7 [-] 1 list: <address>*1
8: a8 ---------- [-] 1 list: <address>*1
9: a9 [-] 1 list: <address>*1
10: a10 ----------- [+] 1 list: <address>*1
Reference Pattern by <[dict of] class>.
0: _ --- [-] 1 list: <address>*0
1: a [-] 1 list: <address>*1
2: aa ---- [-] 1 list: <address>*1
3: a3 [-] 1 list: <address>*1
4: a4 ------ [-] 1 list: <address>*1
5: a5 [-] 1 list: <address>*1
6: a6 -------- [-] 1 list: <address>*1
7: a7 [-] 1 list: <address>*1
8: a8 ---------- [-] 1 list: <address>*1
9: a9 [-] 1 list: <address>*1
<Type e.g. '_.more' for more.>
10: a10 ----------- [-] 1 list: <address>*1
11: a11 [-] 1 list: <address>*1
12: a12 ------------- [-] 1 list: <address>*1
13: a13 [-] 1 list: <address>*1
14: a14 --------------- [-] 1 list: <address>*1
15: a15 [-] 1 list: <address>*1
16: a16 ----------------- [-] 1 list: <address>*1
17: a17 [-] 1 list: <address>*1
18: a18 ------------------- [-] 1 list: <address>*1
19: a19 [-] 1 list: <address>*1
<Type e.g. '_.more' for more.>
10: a10 ----------- [-] 1 list: <address>*1
11: a11 [-] 1 list: <address>*1
12: a12 ------------- [-] 1 list: <address>*1
13: a13 [-] 1 list: <address>*1
14: a14 --------------- [-] 1 list: <address>*1
15: a15 [-] 1 list: <address>*1
16: a16 ----------------- [-] 1 list: <address>*1
17: a17 [-] 1 list: <address>*1
18: a18 ------------------- [-] 1 list: <address>*1
19: a19 [-] 1 list: <address>*1
20: a20 --------------------- [+] 1 list: <address>*1
""")
def test_referrer_registration(self):
import gc
# The reference pattern should register itself as referrer target
# so that after a gc, the rp target will still be included in the referrer target
# Since the target is passed to referrers and update, it will still find the
# referrers. It is an optimization issue: it should cover the referrers.
# We test this by having two different-typed referrers
# Accessing a referrer of the first one, then gc collecting, then checking that
# the second one can be accessed without update: it was created automatically.
# The test failed when not registering, but succeeded when registering was added.
# It succeeds any case if no GC collection is made.
dst = []
a = [dst]
aa = [a]
b = (dst,)
ba = [b]
src = [aa, ba]
rp = self.rp(dst, src)
self.asis(rp._.theone, dst)
gc.collect()
self.asis(rp.aa.theone, aa)
self.asis(rp.View.rg[b][0], ba)
def test_some_more_advanced_usages(self):
import gc
# Test immediate dominators
dst = []
src = [dst]
src.append([dst])
rp = self.rp(dst, src, depth=10, imdom=1)
self.asis(rp._.theone, dst)
self.asis(rp.a.theone, src)
# Test with mixed types
# In particular, dict owned by an instance
dst = []
class A:
pass
a = A()
a.dst = dst
b = {'dst':dst}
src = (a, b)
gc.collect()
rp = self.rp(dst, src, depth=10)
rp.er.classifier.is_clear_drg_enabled = 0 # Note Apr 19 2005
self.asis(rp.a.theone, b)
self.asis(rp.b.theone, a.__dict__)
# Test that the dict is eventually automatically removed from dictowners -
# First test that dictowners is nonzero
ln = len(rp.mod.View.dict_ownership)
self.assert_(ln > 0)
del src
del a
mod = rp.mod
rp.er.classifier.is_clear_drg_enabled = 1
del rp
# It is cleared after GC
gc.collect()
lnnow = len(mod.View.dict_ownership)
self.assert_(lnnow == 0)
class NewCase(TestCase):
# Some new tests as they come up
def test_reset(self):
# Test the .reset() method
dst = []
a = [dst]
b = [dst]
src = [a,b]
rp = self.rp(dst, src)
self.aseq( rp.a, self.iso(a, b) )
b.pop()
rp.reset()
self.aseq( rp.a, self.iso(a) )
def test_paths(self):
# Test the .paths() method
dst = []
a = [dst]+[None]*40 # Make order well-defined. Note May 2 2005.
b = [dst]
src = [a,b]
rp = self.rp(dst, src)
expected = """\
Paths from source 'a3' to target '_'.
0: a3 [0] @ [0]
1: aa [0] @ [0]
2: a [0] @ [0]
3: _ [0] = <1 list: <address>*0>
4: aa [0] @ [1]
5: a [1] @ [0] -> #3"""
self.aseq( str(rp.paths('a3')), expected)
expected = expected[:expected.index('\n 4:')]
# Test the andsets argument, given as a dict
self.aseq( str(rp.paths('a3', andsets={'a':self.iso(a)})), expected)
# Test the andsets argument, given as a list
self.aseq( str(rp.paths('a3', andsets=[None, None, self.iso(a)])), expected)
def test_main(debug=0):
support.run_unittest(RefPatCase,debug)
support.run_unittest(NewCase,debug)
def test_leak():
# Runs the tests in a loop and prints memory statistics,
# to see if there are underlying low-level memory problems.
# Requires Python to be compiled with debug support.
from guppy.heapy.heapyc import xmemstats
import gc, sys, time
i = 0
xmemstats()
while 1:
print '[%d]'%i, time.asctime()
i += 1
test_main()
gc.collect()
xmemstats()
if __name__ == "__main__":
test_main()
|
syci/OCB
|
refs/heads/9.0
|
addons/decimal_precision/__init__.py
|
47
|
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
#import decimal_precision
from decimal_precision import get_precision
|
alex/html5lib-python
|
refs/heads/master
|
html5lib/treewalkers/__init__.py
|
499
|
"""A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
__all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshistream", "lxmletree",
"pulldom"]
import sys
from .. import constants
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
|
zirou30/python_student
|
refs/heads/master
|
87.py
|
1
|
ingredientes = ['queijo', 'tomate', 'calabresa']
for ingrediente in ingredientes:
if ingrediente == 'tomate':
print('Desculpa estamos sem ' + ingredientes[1])
else:
print('Adicionado ' + ingrediente + '.')
print('Pizza Pronta')
|
johnw424/airflow
|
refs/heads/master
|
airflow/operators/__init__.py
|
8
|
'''
Imports operators dynamically while keeping the package API clean,
abstracting the underlying modules
'''
from airflow.utils import import_module_attrs as _import_module_attrs
# These need to be integrated first as other operators depend on them
_import_module_attrs(globals(), {
'check_operator': [
'CheckOperator',
'ValueCheckOperator',
'IntervalCheckOperator',
],
})
_operators = {
'bash_operator': ['BashOperator'],
'python_operator': ['PythonOperator', 'BranchPythonOperator'],
'hive_operator': ['HiveOperator'],
'presto_check_operator': [
'PrestoCheckOperator',
'PrestoValueCheckOperator',
'PrestoIntervalCheckOperator',
],
'dummy_operator': ['DummyOperator'],
'email_operator': ['EmailOperator'],
'hive_to_samba_operator': ['Hive2SambaOperator'],
'mysql_operator': ['MySqlOperator'],
'sqlite_operator': ['SqliteOperator'],
'mysql_to_hive': ['MySqlToHiveTransfer'],
'postgres_operator': ['PostgresOperator'],
'sensors': [
'SqlSensor',
'ExternalTaskSensor',
'HivePartitionSensor',
'S3KeySensor',
'S3PrefixSensor',
'HdfsSensor',
'TimeSensor',
'TimeDeltaSensor',
'HttpSensor'
],
'subdag_operator': ['SubDagOperator'],
'hive_stats_operator': ['HiveStatsCollectionOperator'],
's3_to_hive_operator': ['S3ToHiveTransfer'],
'hive_to_mysql': ['HiveToMySqlTransfer'],
's3_file_transform_operator': ['S3FileTransformOperator'],
'http_operator': ['SimpleHttpOperator'],
'hive_to_druid': ['HiveToDruidTransfer'],
'jdbc_operator': ['JdbcOperator'],
'mssql_operator': ['MsSqlOperator'],
'mssql_to_hive': ['MsSqlToHiveTransfer'],
'slack_operator': ['SlackAPIPostOperator'],
'generic_transfer': ['GenericTransfer'],
}
_import_module_attrs(globals(), _operators)
from airflow.models import BaseOperator
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import operators as _operators
for _operator in _operators:
globals()[_operator.__name__] = _operator
|
scalable-networks/gnuradio-3.7.2.1
|
refs/heads/master
|
grc/python/Platform.py
|
7
|
__doc__ = """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import os
from gnuradio import gr
from .. base.Platform import Platform as _Platform
from .. gui.Platform import Platform as _GUIPlatform
from FlowGraph import FlowGraph as _FlowGraph
from Connection import Connection as _Connection
from Block import Block as _Block
from Port import Port as _Port
from Param import Param as _Param
from Generator import Generator
from Constants import \
HIER_BLOCKS_LIB_DIR, BLOCK_DTD, \
DEFAULT_FLOW_GRAPH, BLOCKS_DIRS
import Constants
COLORS = [(name, color) for name, key, sizeof, color in Constants.CORE_TYPES]
class Platform(_Platform, _GUIPlatform):
def __init__(self):
"""
Make a platform for gnuradio.
"""
#ensure hier dir
if not os.path.exists(HIER_BLOCKS_LIB_DIR): os.mkdir(HIER_BLOCKS_LIB_DIR)
#convert block paths to absolute paths
block_paths = set(map(os.path.abspath, BLOCKS_DIRS))
#init
_Platform.__init__(
self,
name='GNU Radio Companion',
version=gr.version(),
key='grc',
license=__doc__.strip(),
website='http://gnuradio.org/redmine/wiki/gnuradio/GNURadioCompanion',
block_paths=block_paths,
block_dtd=BLOCK_DTD,
default_flow_graph=DEFAULT_FLOW_GRAPH,
generator=Generator,
colors=COLORS,
)
_GUIPlatform.__init__(self)
##############################################
# Constructors
##############################################
FlowGraph = _FlowGraph
Connection = _Connection
Block = _Block
Port = _Port
Param = _Param
|
heran7/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_html_module.py
|
61
|
import unittest
from mock import Mock
from xblock.field_data import DictFieldData
from xmodule.html_module import HtmlModule
from . import get_test_system
class HtmlModuleSubstitutionTestCase(unittest.TestCase):
descriptor = Mock()
def test_substitution_works(self):
sample_xml = '''%%USER_ID%%'''
field_data = DictFieldData({'data': sample_xml})
module_system = get_test_system()
module = HtmlModule(self.descriptor, module_system, field_data, Mock())
self.assertEqual(module.get_html(), str(module_system.anonymous_student_id))
def test_substitution_without_magic_string(self):
sample_xml = '''
<html>
<p>Hi USER_ID!11!</p>
</html>
'''
field_data = DictFieldData({'data': sample_xml})
module_system = get_test_system()
module = HtmlModule(self.descriptor, module_system, field_data, Mock())
self.assertEqual(module.get_html(), sample_xml)
def test_substitution_without_anonymous_student_id(self):
sample_xml = '''%%USER_ID%%'''
field_data = DictFieldData({'data': sample_xml})
module_system = get_test_system()
module_system.anonymous_student_id = None
module = HtmlModule(self.descriptor, module_system, field_data, Mock())
self.assertEqual(module.get_html(), sample_xml)
|
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8
|
refs/heads/master
|
lib/python2.7/test/test_codecmaps_cn.py
|
150
|
#!/usr/bin/env python
#
# test_codecmaps_cn.py
# Codec mapping tests for PRC encodings
#
from test import test_support
from test import test_multibytecodec_support
import unittest
class TestGB2312Map(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'gb2312'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-CN.TXT'
class TestGBKMap(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'gbk'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/' \
'MICSFT/WINDOWS/CP936.TXT'
class TestGB18030Map(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'gb18030'
mapfileurl = 'http://source.icu-project.org/repos/icu/data/' \
'trunk/charset/data/xml/gb-18030-2000.xml'
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
hhkaos/awesome-arcgis
|
refs/heads/master
|
node_modules/gitbook-cli/node_modules/npmi/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py
|
2779
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
|
woodshop/chainer
|
refs/heads/master
|
tests/functions_tests/test_softmax.py
|
4
|
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
if cuda.available:
cuda.init()
class TestSoftmax(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.softmax(x, use_cudnn)
self.assertEqual(y.data.dtype, numpy.float32)
y_expect = numpy.exp(self.x)
for i in six.moves.range(y_expect.shape[0]):
y_expect[i] /= y_expect[i].sum()
gradient_check.assert_allclose(y_expect, y.data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), False)
def check_backward(self, x_data, gy_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.softmax(x, use_cudnn)
y.grad = gy_data
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,), eps=1e-2)
gradient_check.assert_allclose(gx, x.grad)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), False)
class TestReplicatedSoftmax1(TestSoftmax):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.softmax(x, use_cudnn)
self.assertEqual(y.data.dtype, numpy.float32)
y_expect = numpy.exp(self.x)
for i in six.moves.range(y_expect.shape[0]):
for k in six.moves.range(y_expect.shape[2]):
y_expect[i, :, k] /= y_expect[i, :, k].sum()
gradient_check.assert_allclose(y_expect, y.data)
class TestReplicatedSoftmax2(TestSoftmax):
def setUp(self):
self.x = numpy.random.uniform(
-1, 1, (2, 3, 4, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1, (2, 3, 4, 5)).astype(numpy.float32)
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.softmax(x, use_cudnn)
self.assertEqual(y.data.dtype, numpy.float32)
y_expect = numpy.exp(self.x)
for i in six.moves.range(y_expect.shape[0]):
for k in six.moves.range(y_expect.shape[2]):
for l in six.moves.range(y_expect.shape[3]):
y_expect[i, :, k, l] /= y_expect[i, :, k, l].sum()
gradient_check.assert_allclose(y_expect, y.data)
testing.run_module(__name__, __file__)
|
StuartMacKay/checklists_scrapers
|
refs/heads/master
|
checklists_scrapers/tests/scripts/validate_worldbirds_scraper.py
|
1
|
"""
validate_worldbirds_scraper.py
This script used to validate the scraper used to download checklists from a
WorldBirds server. Once the scraper has completed the suite of tests in the
module checklists_scrapers.tests.sites is executed to verify that all the
information is extracted correctly from the site.
To run the tests on the checklists downloaded by the WorldBirds scraper run
the script as follows:
python validate_worldbirds_scraper.py <username> <password> <country>
where,
<username> is the username for the account on the server.
<password> is the password for the account on the server.
<country> is the country code that identifies the server to access.
"""
import json
import nose
import shutil
import sys
import tempfile
from scrapy.settings import CrawlerSettings
from checklists_scrapers import settings
from checklists_scrapers.spiders.worldbirds_spider import WorldBirdsSpider
from checklists_scrapers.tests.utils import RunCrawler
from checklists_scrapers.utils import list_files
from checklists_scrapers.tests.validation import checklists
settings.DOWNLOAD_DIR = tempfile.mkdtemp()
settings.REPORT_RECIPIENTS = ''
username = sys.argv[1]
password = sys.argv[2]
country = sys.argv[3]
spider = WorldBirdsSpider(username=username, password=password, country=country)
RunCrawler(CrawlerSettings(settings)).crawl(spider)
for path in list_files(settings.DOWNLOAD_DIR, 'json'):
with open(path, 'rb') as fp:
checklists.append(json.load(fp))
nose.run(argv=['checklists_scrapers.tests.validation'])
shutil.rmtree(settings.DOWNLOAD_DIR)
|
vladmm/intellij-community
|
refs/heads/master
|
python/testData/inspections/AddCallSuperOptionalAndRequiredParamsNameCollision.py
|
79
|
class A:
def __init__(self, a):
pass
class B(A):
def <warning descr="Call to __init__ of super class is missed">__init_<caret>_</warning>(self, a=1):
pass
|
gcode-mirror/audacity
|
refs/heads/master
|
lib-src/lv2/lv2/plugins/eg01-amp.lv2/waflib/Tools/irixcc.py
|
330
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Utils
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_irixcc(conf):
v=conf.env
cc=None
if v['CC']:cc=v['CC']
elif'CC'in conf.environ:cc=conf.environ['CC']
if not cc:cc=conf.find_program('cc',var='CC')
if not cc:conf.fatal('irixcc was not found')
cc=conf.cmd_to_list(cc)
try:
conf.cmd_and_log(cc+['-version'])
except Exception:
conf.fatal('%r -version could not be executed'%cc)
v['CC']=cc
v['CC_NAME']='irix'
@conf
def irixcc_common_flags(conf):
v=conf.env
v['CC_SRC_F']=''
v['CC_TGT_F']=['-c','-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
if not v['LINK_CC']:v['LINK_CC']=v['CC']
v['CCLNK_SRC_F']=''
v['CCLNK_TGT_F']=['-o']
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['cprogram_PATTERN']='%s'
v['cshlib_PATTERN']='lib%s.so'
v['cstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_irixcc()
conf.find_cpp()
conf.find_ar()
conf.irixcc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
|
sgarrity/bedrock
|
refs/heads/master
|
bedrock/newsletter/redirects.py
|
4
|
from bedrock.redirects.util import redirect
redirectpatterns = (
# bug 926629
redirect(r'^newsletter/about_mobile(?:/(?:index\.html)?)?$', 'newsletter.subscribe'),
redirect(r'^newsletter/about_mozilla(?:/(?:index\.html)?)?$', 'mozorg.contribute.index'),
redirect(r'^newsletter/new(?:/(?:index\.html)?)?$', 'newsletter.subscribe'),
redirect(r'^newsletter/ios(?:/(?:index\.html)?)?$', 'firefox.mobile.index'),
)
|
creativeprogramming/ajenti
|
refs/heads/master
|
plugins/services/groups.py
|
12
|
from ajenti.api import *
from ajenti.com import *
class ServiceGroups (Plugin):
def __init__(self):
self.read()
def read(self):
if not self.app.config.has_section('services'):
self.app.config.add_section('services')
r = {}
names = {}
content = {}
for n in self.app.config.options('services'):
if n.startswith('groupname-'):
names[n.split('-')[1]] = self.app.config.get('services', n)
if n.startswith('groupcontent-'):
content[n.split('-')[1]] = self.app.config.get('services', n)
for n in names.keys():
r[names[n]] = content[n].split(' ')
self.groups = r
def save(self):
if self.app.config.has_section('services'):
self.app.config.remove_section('services')
self.app.config.add_section('services')
idx = 0
for i in self.groups.keys():
self.app.config.set('services', 'groupname-%i'%idx, i)
self.app.config.set('services', 'groupcontent-%i'%idx, ' '.join(self.groups[i]))
idx += 1
self.app.config.save()
|
israeleriston/scientific-week
|
refs/heads/master
|
backend/venv/lib/python3.5/site-packages/pip/req/req_set.py
|
338
|
from __future__ import absolute_import
from collections import defaultdict
from itertools import chain
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.compat import expanduser
from pip.download import (is_file_url, is_dir_url, is_vcs_url, url_to_path,
unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError,
HashError, HashErrors, HashUnpinned,
DirectoryUrlHashUnsupported, VcsHashUnsupported,
UnsupportedPythonVersion)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.hashes import MissingHashes
from pip.utils.logging import indent_log
from pip.utils.packaging import check_dist_requires_python
from pip.vcs import vcs
from pip.wheel import Wheel
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
upgrade_strategy=None, ignore_installed=False, as_egg=False,
target_dir=None, ignore_dependencies=False,
force_reinstall=False, use_user_site=False, session=None,
pycompile=True, isolated=False, wheel_download_dir=None,
wheel_cache=None, require_hashes=False,
ignore_requires_python=False):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.upgrade_strategy = upgrade_strategy
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.ignore_requires_python = ignore_requires_python
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
self.require_hashes = require_hashes
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None,
extras_requested=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environement markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers(extras_requested):
logger.warning("Ignoring %s: markers '%s' don't match your "
"environment", install_req.name,
install_req.markers)
return []
# This check has to come after we filter requirements with the
# environment markers.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
if not wheel.supported():
raise InstallationError(
"%s is not a supported wheel on this platform." %
wheel.filename
)
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
install_req.is_direct = (parent_req_name is None)
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint and
existing_req.extras == install_req.extras and not
existing_req.req.specifier == install_req.req.specifier):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
result = []
if not install_req.constraint and existing_req.constraint:
if (install_req.link and not (existing_req.link and
install_req.link.path == existing_req.link.path)):
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(
sorted(set(existing_req.extras).union(
set(install_req.extras))))
logger.debug("Setting %s extras to: %s",
existing_req, existing_req.extras)
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
name = project_name.lower()
if (name in self.requirements and
not self.requirements[name].constraint or
name in self.requirement_aliases and
not self.requirements[self.requirement_aliases[name]].constraint):
return True
return False
@property
def has_requirements(self):
return list(req for req in self.requirements.values() if not
req.constraint) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
if req.constraint:
continue
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = self.unnamed_requirements + self.requirements.values()
require_hashes = (self.require_hashes or
any(req.has_hash_options for req in root_reqs))
if require_hashes and self.as_egg:
raise InstallationError(
'--egg is not allowed with --require-hashes mode, since it '
'delegates dependency resolution to setuptools and could thus '
'result in installation of unhashed packages.')
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(self._prepare_file(
finder,
req,
require_hashes=require_hashes,
ignore_dependencies=self.ignore_dependencies))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
def _is_upgrade_allowed(self, req):
return self.upgrade and (
self.upgrade_strategy == "eager" or (
self.upgrade_strategy == "only-if-needed" and req.is_direct
)
)
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
upgrade_allowed = self._is_upgrade_allowed(req_to_install)
# Is the best version is installed.
best_installed = False
if upgrade_allowed:
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(
req_to_install, upgrade_allowed)
except BestVersionAlreadyInstalled:
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
# Figure out a nice message to say why we're skipping this.
if best_installed:
skip_reason = 'already up-to-date'
elif self.upgrade_strategy == "only-if-needed":
skip_reason = 'not upgraded as not directly required'
else:
skip_reason = 'already satisfied'
return skip_reason
else:
return None
def _prepare_file(self,
finder,
req_to_install,
require_hashes=False,
ignore_dependencies=False):
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# ###################### #
# # print log messages # #
# ###################### #
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
if require_hashes:
raise InstallationError(
'The editable requirement %s cannot be installed when '
'requiring hashes, because there is no single file to '
'hash.' % req_to_install)
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
req_to_install.check_if_exists()
elif req_to_install.satisfied_by:
if require_hashes:
logger.debug(
'Since it is already installed, we are trusting this '
'package without checking its hash. To ensure a '
'completely repeatable environment, install into an '
'empty virtualenv.')
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(
finder,
self._is_upgrade_allowed(req_to_install),
require_hashes
)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
link = req_to_install.link
# Now that we have the real link, we can tell what kind of
# requirements we have and raise some more informative errors
# than otherwise. (For example, we can raise VcsHashUnsupported
# for a VCS URL rather than HashMissing.)
if require_hashes:
# We could check these first 2 conditions inside
# unpack_url and save repetition of conditions, but then
# we would report less-useful error messages for
# unhashable requirements, complaining that there's no
# hash provided.
if is_vcs_url(link):
raise VcsHashUnsupported()
elif is_file_url(link) and is_dir_url(link):
raise DirectoryUrlHashUnsupported()
if (not req_to_install.original_link and
not req_to_install.is_pinned):
# Unpinned packages are asking for trouble when a new
# version is uploaded. This isn't a security check, but
# it saves users a surprising hash mismatch in the
# future.
#
# file:/// URLs aren't pinnable, so don't complain
# about them not being pinned.
raise HashUnpinned()
hashes = req_to_install.hashes(
trust_internet=not require_hashes)
if require_hashes and not hashes:
# Known-good hashes are missing for this requirement, so
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
hashes = MissingHashes()
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session, hashes=hashes)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
try:
check_dist_requires_python(dist)
except UnsupportedPythonVersion as e:
if self.ignore_requires_python:
logger.warning(e.args[0])
else:
req_to_install.remove_temporary_source()
raise
more_reqs = []
def add_req(subreq, extras_requested):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name,
extras_requested=extras_requested))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
|
jcpowermac/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/service.py
|
37
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) Ansible Inc, 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import glob
import os
import pickle
import platform
import select
import shlex
import subprocess
import traceback
from ansible.module_utils.six import PY2, b
from ansible.module_utils._text import to_bytes, to_text
def sysv_is_enabled(name):
'''
This function will check if the service name supplied
is enabled in any of the sysv runlevels
:arg name: name of the service to test for
'''
return bool(glob.glob('/etc/rc?.d/S??%s' % name))
def get_sysv_script(name):
'''
This function will return the expected path for an init script
corresponding to the service name supplied.
:arg name: name or path of the service to test for
'''
if name.startswith('/'):
result = name
else:
result = '/etc/init.d/%s' % name
return result
def sysv_exists(name):
'''
This function will return True or False depending on
the existence of an init script corresponding to the service name supplied.
:arg name: name of the service to test for
'''
return os.path.exists(get_sysv_script(name))
def fail_if_missing(module, found, service, msg=''):
'''
This function will return an error or exit gracefully depending on check mode status
and if the service is missing or not.
:arg module: is an AnsibleModule object, used for it's utility methods
:arg found: boolean indicating if services was found or not
:arg service: name of service
:kw msg: extra info to append to error/success msg when missing
'''
if not found:
if module.check_mode:
module.exit_json(msg="Service %s not found on %s, assuming it will exist on full run" % (service, msg), changed=True)
else:
module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg))
def fork_process():
'''
This function performs the double fork process to detach from the
parent process and execute.
'''
pid = os.fork()
if pid == 0:
# Set stdin/stdout/stderr to /dev/null
fd = os.open(os.devnull, os.O_RDWR)
# clone stdin/out/err
for num in range(3):
if fd != num:
os.dup2(fd, num)
# close otherwise
if fd not in range(3):
os.close(fd)
# Make us a daemon
pid = os.fork()
# end if not in child
if pid > 0:
os._exit(0)
# get new process session and detach
sid = os.setsid()
if sid == -1:
raise Exception("Unable to detach session while daemonizing")
# avoid possible problems with cwd being removed
os.chdir("/")
pid = os.fork()
if pid > 0:
os._exit(0)
return pid
def daemonize(module, cmd):
'''
Execute a command while detaching as a daemon, returns rc, stdout, and stderr.
:arg module: is an AnsibleModule object, used for it's utility methods
:arg cmd: is a list or string representing the command and options to run
This is complex because daemonization is hard for people.
What we do is daemonize a part of this module, the daemon runs the command,
picks up the return code and output, and returns it to the main process.
'''
# init some vars
chunk = 4096 # FIXME: pass in as arg?
errors = 'surrogate_or_strict'
# start it!
try:
pipe = os.pipe()
pid = fork_process()
except OSError:
module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc())
except Exception as exc:
module.fail_json(msg=to_text(exc), exception=traceback.format_exc())
# we don't do any locking as this should be a unique module/process
if pid == 0:
os.close(pipe[0])
# if command is string deal with py2 vs py3 conversions for shlex
if not isinstance(cmd, list):
if PY2:
cmd = shlex.split(to_bytes(cmd, errors=errors))
else:
cmd = shlex.split(to_text(cmd, errors=errors))
# make sure we always use byte strings
run_cmd = []
for c in cmd:
run_cmd.append(to_bytes(c, errors=errors))
# execute the command in forked process
p = subprocess.Popen(run_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
fds = [p.stdout, p.stderr]
# loop reading output till its done
output = {p.stdout: b(""), p.sterr: b("")}
while fds:
rfd, wfd, efd = select.select(fds, [], fds, 1)
if (rfd + wfd + efd) or p.poll():
for out in fds:
if out in rfd:
data = os.read(out.fileno(), chunk)
if not data:
fds.remove(out)
output[out] += b(data)
# even after fds close, we might want to wait for pid to die
p.wait()
# Return a pickled data of parent
return_data = pickle.dumps([p.returncode, to_text(output[p.stdout]), to_text(output[p.stderr])], protocol=pickle.HIGHEST_PROTOCOL)
os.write(pipe[1], to_bytes(return_data, errors=errors))
# clean up
os.close(pipe[1])
os._exit(0)
elif pid == -1:
module.fail_json(msg="Unable to fork, no exception thrown, probably due to lack of resources, check logs.")
else:
# in parent
os.close(pipe[1])
os.waitpid(pid, 0)
# Grab response data after child finishes
return_data = b("")
while True:
rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
if pipe[0] in rfd:
data = os.read(pipe[0], chunk)
if not data:
break
return_data += b(data)
# Note: no need to specify encoding on py3 as this module sends the
# pickle to itself (thus same python interpreter so we aren't mixing
# py2 and py3)
return pickle.loads(to_bytes(return_data, errors=errors))
def check_ps(module, pattern):
# Set ps flags
if platform.system() == 'SunOS':
psflags = '-ef'
else:
psflags = 'auxww'
# Find ps binary
psbin = module.get_bin_path('ps', True)
(rc, out, err) = module.run_command('%s %s' % (psbin, psflags))
# If rc is 0, set running as appropriate
if rc == 0:
for line in out.split('\n'):
if pattern in line:
return True
return False
|
DailyActie/Surrogate-Model
|
refs/heads/master
|
01-codes/scipy-master/scipy/linalg/_solvers.py
|
1
|
"""Matrix equation solver routines"""
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
# February 24, 2012
# Modified: Chad Fulton <ChadFulton@gmail.com>
# June 19, 2014
# Modified: Ilhan Polat <ilhanpolat@gmail.com>
# September 13, 2016
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import inv, LinAlgError, norm, cond, svd
from ._decomp_qz import ordqz
from .basic import solve, solve_triangular, matrix_balance
from .decomp import _asarray_validated
from .decomp_lu import lu
from .decomp_qr import qr
from .decomp_schur import schur
from .lapack import get_lapack_funcs
from .special_matrices import kron, block_diag
__all__ = ['solve_sylvester', 'solve_lyapunov', 'solve_discrete_lyapunov',
'solve_continuous_are', 'solve_discrete_are']
def solve_sylvester(a, b, q):
"""
Computes a solution (X) to the Sylvester equation :math:`AX + XB = Q`.
Parameters
----------
a : (M, M) array_like
Leading matrix of the Sylvester equation
b : (N, N) array_like
Trailing matrix of the Sylvester equation
q : (M, N) array_like
Right-hand side
Returns
-------
x : (M, N) ndarray
The solution to the Sylvester equation.
Raises
------
LinAlgError
If solution was not found
Notes
-----
Computes a solution to the Sylvester matrix equation via the Bartels-
Stewart algorithm. The A and B matrices first undergo Schur
decompositions. The resulting matrices are used to construct an
alternative Sylvester equation (``RY + YS^T = F``) where the R and S
matrices are in quasi-triangular form (or, when R, S or F are complex,
triangular form). The simplified equation is then solved using
``*TRSYL`` from LAPACK directly.
.. versionadded:: 0.11.0
"""
# Compute the Schur decomp form of a
r, u = schur(a, output='real')
# Compute the Schur decomp of b
s, v = schur(b.conj().transpose(), output='real')
# Construct f = u'*q*v
f = np.dot(np.dot(u.conj().transpose(), q), v)
# Call the Sylvester equation solver
trsyl, = get_lapack_funcs(('trsyl',), (r, s, f))
if trsyl is None:
raise RuntimeError('LAPACK implementation does not contain a proper '
'Sylvester equation solver (TRSYL)')
y, scale, info = trsyl(r, s, f, tranb='C')
y = scale * y
if info < 0:
raise LinAlgError("Illegal value encountered in "
"the %d term" % (-info,))
return np.dot(np.dot(u, y), v.conj().transpose())
def solve_lyapunov(a, q):
"""
Solves the continuous Lyapunov equation :math:`AX + XA^H = Q`.
Uses the Bartels-Stewart algorithm to find :math:`X`.
Parameters
----------
a : array_like
A square matrix
q : array_like
Right-hand side square matrix
Returns
-------
x : array_like
Solution to the continuous Lyapunov equation
See Also
--------
solve_sylvester : computes the solution to the Sylvester equation
Notes
-----
Because the continuous Lyapunov equation is just a special form of the
Sylvester equation, this solver relies entirely on solve_sylvester for a
solution.
.. versionadded:: 0.11.0
"""
return solve_sylvester(a, a.conj().transpose(), q)
def _solve_discrete_lyapunov_direct(a, q):
"""
Solves the discrete Lyapunov equation directly.
This function is called by the `solve_discrete_lyapunov` function with
`method=direct`. It is not supposed to be called directly.
"""
lhs = kron(a, a.conj())
lhs = np.eye(lhs.shape[0]) - lhs
x = solve(lhs, q.flatten())
return np.reshape(x, q.shape)
def _solve_discrete_lyapunov_bilinear(a, q):
"""
Solves the discrete Lyapunov equation using a bilinear transformation.
This function is called by the `solve_discrete_lyapunov` function with
`method=bilinear`. It is not supposed to be called directly.
"""
eye = np.eye(a.shape[0])
aH = a.conj().transpose()
aHI_inv = inv(aH + eye)
b = np.dot(aH - eye, aHI_inv)
c = 2 * np.dot(np.dot(inv(a + eye), q), aHI_inv)
return solve_lyapunov(b.conj().transpose(), -c)
def solve_discrete_lyapunov(a, q, method=None):
"""
Solves the discrete Lyapunov equation :math:`AXA^H - X + Q = 0`.
Parameters
----------
a, q : (M, M) array_like
Square matrices corresponding to A and Q in the equation
above respectively. Must have the same shape.
method : {'direct', 'bilinear'}, optional
Type of solver.
If not given, chosen to be ``direct`` if ``M`` is less than 10 and
``bilinear`` otherwise.
Returns
-------
x : ndarray
Solution to the discrete Lyapunov equation
See Also
--------
solve_lyapunov : computes the solution to the continuous Lyapunov equation
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *direct* if ``M`` is less than 10
and ``bilinear`` otherwise.
Method *direct* uses a direct analytical solution to the discrete Lyapunov
equation. The algorithm is given in, for example, [1]_. However it requires
the linear solution of a system with dimension :math:`M^2` so that
performance degrades rapidly for even moderately sized matrices.
Method *bilinear* uses a bilinear transformation to convert the discrete
Lyapunov equation to a continuous Lyapunov equation :math:`(BX+XB'=-C)`
where :math:`B=(A-I)(A+I)^{-1}` and
:math:`C=2(A' + I)^{-1} Q (A + I)^{-1}`. The continuous equation can be
efficiently solved since it is a special case of a Sylvester equation.
The transformation algorithm is from Popov (1964) as described in [2]_.
.. versionadded:: 0.11.0
References
----------
.. [1] Hamilton, James D. Time Series Analysis, Princeton: Princeton
University Press, 1994. 265. Print.
http://www.scribd.com/doc/20577138/Hamilton-1994-Time-Series-Analysis
.. [2] Gajic, Z., and M.T.J. Qureshi. 2008.
Lyapunov Matrix Equation in System Stability and Control.
Dover Books on Engineering Series. Dover Publications.
"""
a = np.asarray(a)
q = np.asarray(q)
if method is None:
# Select automatically based on size of matrices
if a.shape[0] >= 10:
method = 'bilinear'
else:
method = 'direct'
meth = method.lower()
if meth == 'direct':
x = _solve_discrete_lyapunov_direct(a, q)
elif meth == 'bilinear':
x = _solve_discrete_lyapunov_bilinear(a, q)
else:
raise ValueError('Unknown solver %s' % method)
return x
def solve_continuous_are(a, b, q, r, e=None, s=None, balanced=True):
"""
Solves the continuous-time algebraic Riccati equation (CARE).
The CARE is defined as
.. math::
X A + A^H X - X B R^{-1} B^H X + Q = 0
The limitations for a solution to exist are :
* All eigenvalues of :math:`A` on the right half plane, should be
controllable.
* The associated hamiltonian pencil (See Notes), should have
eigenvalues sufficiently away from the imaginary axis.
Moreover, if ``e`` or ``s`` is not precisely ``None``, then the
generalized version of CARE
.. math::
E^HXA + A^HXE - (E^HXB + S) R^{-1} (B^HXE + S^H) + Q = 0
is solved. When omitted, ``e`` is assumed to be the identity and ``s``
is assumed to be the zero matrix with sizes compatible with ``a`` and
``b`` respectively.
Parameters
----------
a : (M, M) array_like
Square matrix
b : (M, N) array_like
Input
q : (M, M) array_like
Input
r : (N, N) array_like
Nonsingular square matrix
e : (M, M) array_like, optional
Nonsingular square matrix
s : (M, N) array_like, optional
Input
balanced : bool, optional
The boolean that indicates whether a balancing step is performed
on the data. The default is set to True.
Returns
-------
x : (M, M) ndarray
Solution to the continuous-time algebraic Riccati equation.
Raises
------
LinAlgError
For cases where the stable subspace of the pencil could not be
isolated. See Notes section and the references for details.
See Also
--------
solve_discrete_are : Solves the discrete-time algebraic Riccati equation
Notes
-----
The equation is solved by forming the extended hamiltonian matrix pencil,
as described in [1]_, :math:`H - \lambda J` given by the block matrices ::
[ A 0 B ] [ E 0 0 ]
[-Q -A^H -S ] - \lambda * [ 0 E^H 0 ]
[ S^H B^H R ] [ 0 0 0 ]
and using a QZ decomposition method.
In this algorithm, the fail conditions are linked to the symmetry
of the product :math:`U_2 U_1^{-1}` and condition number of
:math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the
eigenvectors spanning the stable subspace with 2m rows and partitioned
into two m-row matrices. See [1]_ and [2]_ for more details.
In order to improve the QZ decomposition accuracy, the pencil goes
through a balancing step where the sum of absolute values of
:math:`H` and :math:`J` entries (after removing the diagonal entries of
the sum) is balanced following the recipe given in [3]_.
.. versionadded:: 0.11.0
References
----------
.. [1] P. van Dooren , "A Generalized Eigenvalue Approach For Solving
Riccati Equations.", SIAM Journal on Scientific and Statistical
Computing, Vol.2(2), DOI: 10.1137/0902010
.. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati
Equations.", Massachusetts Institute of Technology. Laboratory for
Information and Decision Systems. LIDS-R ; 859. Available online :
http://hdl.handle.net/1721.1/1301
.. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001,
SIAM J. Sci. Comput., 2001, Vol.22(5), DOI: 10.1137/S1064827500367993
"""
# Validate input arguments
a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args(
a, b, q, r, e, s, 'care')
H = np.empty((2 * m + n, 2 * m + n), dtype=r_or_c)
H[:m, :m] = a
H[:m, m:2 * m] = 0.
H[:m, 2 * m:] = b
H[m:2 * m, :m] = -q
H[m:2 * m, m:2 * m] = -a.conj().T
H[m:2 * m, 2 * m:] = 0. if s is None else -s
H[2 * m:, :m] = 0. if s is None else s.conj().T
H[2 * m:, m:2 * m] = b.conj().T
H[2 * m:, 2 * m:] = r
if gen_are:
J = block_diag(e, e.conj().T, np.zeros_like(r, dtype=r_or_c))
else:
J = block_diag(np.eye(2 * m), np.zeros_like(r, dtype=r_or_c))
if balanced:
# xGEBAL does not remove the diagonals before scaling. Also
# to avoid destroying the Symplectic structure, we follow Ref.3
M = np.abs(H) + np.abs(J)
M[np.diag_indices_from(M)] = 0.
_, (sca, _) = matrix_balance(M, separate=1, permute=0)
# do we need to bother?
if not np.allclose(sca, np.ones_like(sca)):
# Now impose diag(D,inv(D)) from Benner where D is
# square root of s_i/s_(n+i) for i=0,....
sca = np.log2(sca)
# NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !!
s = np.round((sca[m:2 * m] - sca[:m]) / 2)
sca = 2 ** np.r_[s, -s, sca[2 * m:]]
# Elementwise multiplication via broadcasting.
elwisescale = sca[:, None] * np.reciprocal(sca)
H *= elwisescale
J *= elwisescale
# Deflate the pencil to 2m x 2m ala Ref.1, eq.(55)
q, r = qr(H[:, -n:])
H = q[:, n:].conj().T.dot(H[:, :2 * m])
J = q[:2 * m, n:].conj().T.dot(J[:2 * m, :2 * m])
# Decide on which output type is needed for QZ
out_str = 'real' if r_or_c == float else 'complex'
_, _, _, _, _, u = ordqz(H, J, sort='lhp', overwrite_a=True,
overwrite_b=True, check_finite=False,
output=out_str)
# Get the relevant parts of the stable subspace basis
if gen_are:
u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m])))
u00 = u[:m, :m]
u10 = u[m:, :m]
# Solve via back-substituion after checking the condition of u00
up, ul, uu = lu(u00)
if 1 / cond(uu) < np.spacing(1.):
raise LinAlgError('Failed to find a finite solution.')
# Exploit the triangular structure
x = solve_triangular(ul.conj().T,
solve_triangular(uu.conj().T,
u10.conj().T,
lower=True),
unit_diagonal=True,
).conj().T.dot(up.conj().T)
if balanced:
x *= sca[:m, None] * sca[:m]
# Check the deviation from symmetry for success
u_sym = u00.conj().T.dot(u10)
n_u_sym = norm(u_sym, 1)
u_sym = u_sym - u_sym.conj().T
sym_threshold = np.max([np.spacing(1000.), n_u_sym])
if norm(u_sym, 1) > sym_threshold:
raise LinAlgError('The associated Hamiltonian pencil has eigenvalues '
'too close to the imaginary axis')
return (x + x.conj().T) / 2
def solve_discrete_are(a, b, q, r, e=None, s=None, balanced=True):
"""
Solves the discrete-time algebraic Riccati equation (DARE).
The DARE is defined as
.. math::
A^HXA - X - (A^HXB) (R + B^HXB)^{-1} (B^HXA) + Q = 0
The limitations for a solution to exist are :
* All eigenvalues of :math:`A` outside the unit disc, should be
controllable.
* The associated symplectic pencil (See Notes), should have
eigenvalues sufficiently away from the unit circle.
Moreover, if ``e`` and ``s`` are not both precisely ``None``, then the
generalized version of DARE
.. math::
A^HXA - E^HXE - (A^HXB+S) (R+B^HXB)^{-1} (B^HXA+S^H) + Q = 0
is solved. When omitted, ``e`` is assumed to be the identity and ``s``
is assumed to be the zero matrix.
Parameters
----------
a : (M, M) array_like
Square matrix
b : (M, N) array_like
Input
q : (M, M) array_like
Input
r : (N, N) array_like
Square matrix
e : (M, M) array_like, optional
Nonsingular square matrix
s : (M, N) array_like, optional
Input
balanced : bool
The boolean that indicates whether a balancing step is performed
on the data. The default is set to True.
Returns
-------
x : (M, M) ndarray
Solution to the discrete algebraic Riccati equation.
Raises
------
LinAlgError
For cases where the stable subspace of the pencil could not be
isolated. See Notes section and the references for details.
See Also
--------
solve_continuous_are : Solves the continuous algebraic Riccati equation
Notes
-----
The equation is solved by forming the extended symplectic matrix pencil,
as described in [1]_, :math:`H - \lambda J` given by the block matrices ::
[ A 0 B ] [ E 0 B ]
[ -Q E^H -S ] - \lambda * [ 0 A^H 0 ]
[ S^H 0 R ] [ 0 -B^H 0 ]
and using a QZ decomposition method.
In this algorithm, the fail conditions are linked to the symmetry
of the product :math:`U_2 U_1^{-1}` and condition number of
:math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the
eigenvectors spanning the stable subspace with 2m rows and partitioned
into two m-row matrices. See [1]_ and [2]_ for more details.
In order to improve the QZ decomposition accuracy, the pencil goes
through a balancing step where the sum of absolute values of
:math:`H` and :math:`J` rows/cols (after removing the diagonal entries)
is balanced following the recipe given in [3]_. If the data has small
numerical noise, balancing may amplify their effects and some clean up
is required.
.. versionadded:: 0.11.0
References
----------
.. [1] P. van Dooren , "A Generalized Eigenvalue Approach For Solving
Riccati Equations.", SIAM Journal on Scientific and Statistical
Computing, Vol.2(2), DOI: 10.1137/0902010
.. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati
Equations.", Massachusetts Institute of Technology. Laboratory for
Information and Decision Systems. LIDS-R ; 859. Available online :
http://hdl.handle.net/1721.1/1301
.. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001,
SIAM J. Sci. Comput., 2001, Vol.22(5), DOI: 10.1137/S1064827500367993
"""
# Validate input arguments
a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args(
a, b, q, r, e, s, 'dare')
# Form the matrix pencil
H = np.zeros((2 * m + n, 2 * m + n), dtype=r_or_c)
H[:m, :m] = a
H[:m, 2 * m:] = b
H[m:2 * m, :m] = -q
H[m:2 * m, m:2 * m] = np.eye(m) if e is None else e.conj().T
H[m:2 * m, 2 * m:] = 0. if s is None else -s
H[2 * m:, :m] = 0. if s is None else s.conj().T
H[2 * m:, 2 * m:] = r
J = np.zeros_like(H, dtype=r_or_c)
J[:m, :m] = np.eye(m) if e is None else e
J[m:2 * m, m:2 * m] = a.conj().T
J[2 * m:, m:2 * m] = -b.conj().T
if balanced:
# xGEBAL does not remove the diagonals before scaling. Also
# to avoid destroying the Symplectic structure, we follow Ref.3
M = np.abs(H) + np.abs(J)
M[np.diag_indices_from(M)] = 0.
_, (sca, _) = matrix_balance(M, separate=1, permute=0)
# do we need to bother?
if not np.allclose(sca, np.ones_like(sca)):
# Now impose diag(D,inv(D)) from Benner where D is
# square root of s_i/s_(n+i) for i=0,....
sca = np.log2(sca)
# NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !!
s = np.round((sca[m:2 * m] - sca[:m]) / 2)
sca = 2 ** np.r_[s, -s, sca[2 * m:]]
# Elementwise multiplication via broadcasting.
elwisescale = sca[:, None] * np.reciprocal(sca)
H *= elwisescale
J *= elwisescale
# Deflate the pencil by the R column ala Ref.1
q_of_qr, _ = qr(H[:, -n:])
H = q_of_qr[:, n:].conj().T.dot(H[:, :2 * m])
J = q_of_qr[:, n:].conj().T.dot(J[:, :2 * m])
# Decide on which output type is needed for QZ
out_str = 'real' if r_or_c == float else 'complex'
_, _, _, _, _, u = ordqz(H, J, sort='iuc',
overwrite_a=True,
overwrite_b=True,
check_finite=False,
output=out_str)
# Get the relevant parts of the stable subspace basis
if gen_are:
u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m])))
u00 = u[:m, :m]
u10 = u[m:, :m]
# Solve via back-substituion after checking the condition of u00
up, ul, uu = lu(u00)
if 1 / cond(uu) < np.spacing(1.):
raise LinAlgError('Failed to find a finite solution.')
# Exploit the triangular structure
x = solve_triangular(ul.conj().T,
solve_triangular(uu.conj().T,
u10.conj().T,
lower=True),
unit_diagonal=True,
).conj().T.dot(up.conj().T)
if balanced:
x *= sca[:m, None] * sca[:m]
# Check the deviation from symmetry for success
u_sym = u00.conj().T.dot(u10)
n_u_sym = norm(u_sym, 1)
u_sym = u_sym - u_sym.conj().T
sym_threshold = np.max([np.spacing(1000.), n_u_sym])
if norm(u_sym, 1) > sym_threshold:
raise LinAlgError('The associated symplectic pencil has eigenvalues'
'too close to the unit circle')
return (x + x.conj().T) / 2
def _are_validate_args(a, b, q, r, e, s, eq_type='care'):
"""
A helper function to validate the arguments supplied to the
Riccati equation solvers. Any discrepancy found in the input
matrices leads to a ``ValueError`` exception.
Essentially, it performs:
- a check whether the input is free of NaN and Infs.
- a pass for the data through ``numpy.atleast_2d()``
- squareness check of the relevant arrays,
- shape consistency check of the arrays,
- singularity check of the relevant arrays,
- symmetricity check of the relevant matrices,
- a check whether the regular or the generalized version is asked.
This function is used by ``solve_continuous_are`` and
``solve_discrete_are``.
Parameters
----------
a, b, q, r, e, s : array_like
Input data
eq_type : str
Accepted arguments are 'care' and 'dare'.
Returns
-------
a, b, q, r, e, s : ndarray
Regularized input data
m, n : int
shape of the problem
r_or_c : type
Data type of the problem, returns float or complex
gen_or_not : bool
Type of the equation, True for generalized and False for regular ARE.
"""
if not eq_type.lower() in ('dare', 'care'):
raise ValueError("Equation type unknown. "
"Only 'care' and 'dare' is understood")
a = np.atleast_2d(_asarray_validated(a, check_finite=True))
b = np.atleast_2d(_asarray_validated(b, check_finite=True))
q = np.atleast_2d(_asarray_validated(q, check_finite=True))
r = np.atleast_2d(_asarray_validated(r, check_finite=True))
# Get the correct data types otherwise Numpy complains
# about pushing complex numbers into real arrays.
r_or_c = complex if np.iscomplexobj(b) else float
for ind, mat in enumerate((a, q, r)):
if np.iscomplexobj(mat):
r_or_c = complex
if not np.equal(*mat.shape):
raise ValueError("Matrix {} should be square.".format("aqr"[ind]))
# Shape consistency checks
m, n = b.shape
if m != a.shape[0]:
raise ValueError("Matrix a and b should have the same number of rows.")
if m != q.shape[0]:
raise ValueError("Matrix a and q should have the same shape.")
if n != r.shape[0]:
raise ValueError("Matrix b and r should have the same number of cols.")
# Check if the data matrices q, r are (sufficiently) hermitian
for ind, mat in enumerate((q, r)):
if norm(mat - mat.conj().T, 1) > np.spacing(norm(mat, 1)) * 100:
raise ValueError("Matrix {} should be symmetric/hermitian."
"".format("qr"[ind]))
# Continuous time ARE should have a nonsingular r matrix.
if eq_type == 'care':
min_sv = svd(r, compute_uv=False)[-1]
if min_sv == 0. or min_sv < np.spacing(1.) * norm(r, 1):
raise ValueError('Matrix r is numerically singular.')
# Check if the generalized case is required with omitted arguments
# perform late shape checking etc.
generalized_case = e is not None or s is not None
if generalized_case:
if e is not None:
e = np.atleast_2d(_asarray_validated(e, check_finite=True))
if not np.equal(*e.shape):
raise ValueError("Matrix e should be square.")
if m != e.shape[0]:
raise ValueError("Matrix a and e should have the same shape.")
# numpy.linalg.cond doesn't check for exact zeros and
# emits a runtime warning. Hence the following manual check.
min_sv = svd(e, compute_uv=False)[-1]
if min_sv == 0. or min_sv < np.spacing(1.) * norm(e, 1):
raise ValueError('Matrix e is numerically singular.')
if np.iscomplexobj(e):
r_or_c = complex
if s is not None:
s = np.atleast_2d(_asarray_validated(s, check_finite=True))
if s.shape != b.shape:
raise ValueError("Matrix b and s should have the same shape.")
if np.iscomplexobj(s):
r_or_c = complex
return a, b, q, r, e, s, m, n, r_or_c, generalized_case
|
lucafavatella/intellij-community
|
refs/heads/cli-wip
|
python/testData/refactoring/extractsuperclass/importMultiFile/source_module.after.py
|
320
|
from dest_module import NewParent
class MyClass(NewParent):
pass
|
rlutes/volttron-applications
|
refs/heads/master
|
kisensum/openadr/openadr/features/steps/drevent.py
|
2
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
from behave import given, when, then
from vtn.tests.factories import *
import time
from selenium.webdriver.support.ui import Select
from vtn.models import *
#TODO: Add test cases for editting DR event in OVERVIEW page
#TODO: Editting DR event by taking out one or more sites/adding more sites to the list of sites in DR event and test it.
@when('I add a DR Event with DR program "{dr_program_name}", customer "{name}", site "{site_name}", noti date "{noti_date}", noti time "{noti_time}", start date "{start_date}", start time "{start_time}", end date "{end_date}", end time "{end_time}"')
def step_impl(context, dr_program_name, name, site_name, noti_date, noti_time, start_date, start_time, end_date, end_time):
br = context.browser
assert br.current_url.endswith('/vtn/home/') != -1
br.find_element_by_link_text('Add DR Event').click()
assert br.current_url.endswith('/vtn/dr_event/') != -1
print(br.find_element_by_name("sites").get_attribute('value'))
br.find_element_by_name("dr_program").send_keys(dr_program_name)
time.sleep(5);
br.find_element_by_name("sites").send_keys(site_name)
# Clear existing values
br.find_element_by_name("scheduled_notification_time_0").clear()
br.find_element_by_name("scheduled_notification_time_1").clear()
br.find_element_by_name("start_0").clear()
br.find_element_by_name("start_1").clear()
br.find_element_by_name("end_0").clear()
br.find_element_by_name("end_1").clear()
br.find_element_by_name("scheduled_notification_time_0").send_keys(noti_date)
br.find_element_by_name("scheduled_notification_time_1").send_keys(noti_time)
br.find_element_by_name("start_0").send_keys(start_date)
br.find_element_by_name("start_1").send_keys(start_time)
br.find_element_by_name("end_0").send_keys(end_date)
br.find_element_by_name("end_1").send_keys(end_time)
br.find_element_by_name("save").click()
context.execute_steps('''then I am redirected to the home page''')
@when('I add a DR Event with DR program "{dr_program_name}", customer "{name}", with all sites, noti date "{noti_date}", noti time "{noti_time}", start date "{start_date}", start time "{start_time}", end date "{end_date}", end time "{end_time}"')
def step_impl(context, dr_program_name, name, noti_date, noti_time, start_date, start_time, end_date, end_time):
br = context.browser
assert br.current_url.endswith('/vtn/home/') != -1
br.find_element_by_link_text('Add DR Event').click()
assert br.current_url.endswith('/vtn/dr_event/') != -1
print(br.find_element_by_name("sites").get_attribute('value'))
br.find_element_by_name("dr_program").send_keys(dr_program_name)
time.sleep(5);
# Clear existing values
br.find_element_by_name("scheduled_notification_time_0").clear()
br.find_element_by_name("scheduled_notification_time_1").clear()
br.find_element_by_name("start_0").clear()
br.find_element_by_name("start_1").clear()
br.find_element_by_name("end_0").clear()
br.find_element_by_name("end_1").clear()
br.find_element_by_name("scheduled_notification_time_0").send_keys(noti_date)
br.find_element_by_name("scheduled_notification_time_1").send_keys(noti_time)
br.find_element_by_name("start_0").send_keys(start_date)
br.find_element_by_name("start_1").send_keys(start_time)
br.find_element_by_name("end_0").send_keys(end_date)
br.find_element_by_name("end_1").send_keys(end_time)
br.find_element_by_name("save").click()
# how to figure out which DR Event was just created?
print("There are {} DR Events".format(str(DREvent.objects.filter(dr_program__name="dr_program_test").count())))
context.execute_steps('''then I am redirected to the home page''')
@when('I cancel a DR Event with DR program "{dr_program_name}"')
def step_impl(context, dr_program_name):
br = context.browser
assert br.current_url.endswith('/vtn/home/') != -1
br.find_element_by_link_text(dr_program_name).click()
assert br.current_url.endswith('/vtn/dr_event/edit/.*') != -1
br.find_element_by_id("cancel_event").click()
br.find_element_by_link_text("Cancel").click()
assert br.current_url.endswith('/vtn/home/') != -1
@when('I edit DR Event "{dr_program_name}" by deselecting site "{site_name}"')
def step_impl(context, dr_program_name, site_name):
br = context.browser
br.find_element_by_link_text("Overview").click()
assert br.current_url.endswith("vtn/home/") != -1
br.find_element_by_link_text(dr_program_name).click()
select = Select(br.find_element_by_name('sites'))
select.deselect_by_visible_text(site_name)
print(site_name)
# all_selected_options = select.all_selected_options
# selected_texts = [option.text for option in all_selected_options]
# select.deselect_all()
# for text in selected_texts:
# if text != site_name:
# select.select_by_visible_text(text)
br.find_element_by_name("Save").click()
@when('I edit DR Event "{dr_program_name}" by adding site "{site_name}"')
def step_impl(context, dr_program_name, site_name):
br = context.browser
br.find_element_by_link_text("Overview").click()
assert br.current_url.endswith("vtn/home/") != -1
br.find_element_by_link_text(dr_program_name).click()
select = Select(br.find_element_by_name('sites'))
select.select_by_visible_text(site_name)
br.find_element_by_name("Save").click()
@then('I should see no DR event with DR program "{dr_program_name}"')
def step_impl(context, dr_program_name):
br = context.browser
br.find_element_by_link_text("Overview").click()
assert br.current_url.endswith("/vtn/home/") != -1
lst = br.find_elements_by_xpath("//*[@id='eventTable']//tbody//td")
found = False
for i in range(int(len(lst) / 6)):
if lst[i * 6].text == dr_program_name:
found = True
assert lst[i * 6 + 5].text == "Cancelled"
break
assert found == True
br.find_element_by_link_text("Admin").click()
br.find_element_by_link_text("DR Events").click()
br.find_element_by_link_text(dr_program_name).click()
assert br.find_element_by_name("deleted").is_selected() == True
@then('I should see a DR event of DR program "{dr_program_name}", site "{site_name}", noti date "{noti_date}", noti time "{noti_time}"')
def step_impl(context, dr_program_name, site_name, noti_date, noti_time):
br = context.browser
assert br.current_url.endswith('/vtn/home/') != -1
br.find_element_by_link_text(dr_program_name).click()
assert br.current_url.endswith('/vtn/dr_event/edit/.*') != -1
assert br.find_element_by_name("dr_program").get_attribute('value') == "1"
assert br.find_element_by_name("scheduled_notification_time_0").get_attribute('value') == noti_date
assert br.find_element_by_name("scheduled_notification_time_1").get_attribute('value') == noti_time
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/static/space/debris/shared_droid_fighter_debris_s02.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/space/debris/shared_droid_fighter_debris_s02.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
MFoster/breeze
|
refs/heads/master
|
tests/regressiontests/urlpatterns_reverse/views.py
|
51
|
from django.http import HttpResponse
from django.views.generic import RedirectView
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import user_passes_test
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def erroneous_view(request):
import non_existent
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = "Can I be a view? Pleeeease?"
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
|
mvidalgarcia/indico
|
refs/heads/master
|
indico/modules/events/contributions/models/persons.py
|
2
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.db.sqlalchemy import PyIntEnum, db
from indico.modules.events.models.persons import PersonLinkBase
from indico.util.locators import locator_property
from indico.util.string import format_repr, return_ascii
from indico.util.struct.enum import IndicoEnum
class AuthorType(int, IndicoEnum):
none = 0
primary = 1
secondary = 2
@classmethod
def get_highest(cls, *types):
if any(t == cls.primary for t in types):
return cls.primary
elif any(t == cls.secondary for t in types):
return cls.secondary
else:
return cls.none
class ContributionPersonLink(PersonLinkBase):
"""Association between EventPerson and Contribution."""
__tablename__ = 'contribution_person_links'
__auto_table_args = {'schema': 'events'}
person_link_backref_name = 'contribution_links'
person_link_unique_columns = ('contribution_id',)
object_relationship_name = 'contribution'
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
nullable=False
)
is_speaker = db.Column(
db.Boolean,
nullable=False,
default=False
)
author_type = db.Column(
PyIntEnum(AuthorType),
nullable=False,
default=AuthorType.none
)
# relationship backrefs:
# - contribution (Contribution.person_links)
@property
def is_submitter(self):
if not self.contribution:
raise Exception("No contribution to check submission rights against")
return self.person.has_role('submit', self.contribution)
@property
def is_author(self):
return self.author_type != AuthorType.none
@locator_property
def locator(self):
return dict(self.contribution.locator, person_id=self.id)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'person_id', 'contribution_id', is_speaker=False, author_type=AuthorType.none,
_text=self.full_name)
class SubContributionPersonLink(PersonLinkBase):
"""Association between EventPerson and SubContribution."""
__tablename__ = 'subcontribution_person_links'
__auto_table_args = {'schema': 'events'}
person_link_backref_name = 'subcontribution_links'
person_link_unique_columns = ('subcontribution_id',)
object_relationship_name = 'subcontribution'
# subcontribution persons are always speakers and never authors
# we provide these attributes to make subcontribution links
# compatible with contribution links
is_speaker = True
author_type = AuthorType.none
subcontribution_id = db.Column(
db.Integer,
db.ForeignKey('events.subcontributions.id'),
index=True,
nullable=False
)
# relationship backrefs:
# - subcontribution (SubContribution.person_links)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'person_id', 'subcontribution_id', _text=self.full_name)
|
savoirfairelinux/OpenUpgrade
|
refs/heads/master
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/error.py
|
382
|
##########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
##############################################################################
if __name__<>"package":
from gui import *
class ErrorDialog:
def __init__(self, sErrorMsg, sErrorHelpMsg="", sTitle="Error Message"):
self.win = DBModalDialog(50, 50, 150, 90, sTitle)
self.win.addFixedText("lblErrMsg", 5, 5, 190, 25, sErrorMsg)
self.win.addFixedText("lblErrHelpMsg", 5, 30, 190, 25, sErrorHelpMsg)
self.win.addButton('btnOK', 55,-5,40,15,'Ok'
,actionListenerProc = self.btnOkOrCancel_clicked )
self.win.doModalDialog("",None)
def btnOkOrCancel_clicked( self, oActionEvent ):
self.win.endExecute()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
emijrp/youtube-dl
|
refs/heads/master
|
youtube_dl/postprocessor/metadatafromtitle.py
|
115
|
from __future__ import unicode_literals
import re
from .common import PostProcessor
from ..utils import PostProcessingError
class MetadataFromTitlePPError(PostProcessingError):
pass
class MetadataFromTitlePP(PostProcessor):
def __init__(self, downloader, titleformat):
super(MetadataFromTitlePP, self).__init__(downloader)
self._titleformat = titleformat
self._titleregex = self.format_to_regex(titleformat)
def format_to_regex(self, fmt):
"""
Converts a string like
'%(title)s - %(artist)s'
to a regex like
'(?P<title>.+)\ \-\ (?P<artist>.+)'
"""
lastpos = 0
regex = ""
# replace %(..)s with regex group and escape other string parts
for match in re.finditer(r'%\((\w+)\)s', fmt):
regex += re.escape(fmt[lastpos:match.start()])
regex += r'(?P<' + match.group(1) + '>.+)'
lastpos = match.end()
if lastpos < len(fmt):
regex += re.escape(fmt[lastpos:len(fmt)])
return regex
def run(self, info):
title = info['title']
match = re.match(self._titleregex, title)
if match is None:
raise MetadataFromTitlePPError('Could not interpret title of video as "%s"' % self._titleformat)
for attribute, value in match.groupdict().items():
value = match.group(attribute)
info[attribute] = value
self._downloader.to_screen('[fromtitle] parsed ' + attribute + ': ' + value)
return [], info
|
evanccnyc/ansible
|
refs/heads/devel
|
test/units/template/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
avigmati/djcat
|
refs/heads/master
|
tests/catalog_module_realty/apps.py
|
1
|
from django.apps import AppConfig
class CatalogModuleRealtyConfig(AppConfig):
name = 'catalog_module_realty'
|
CamelBackNotation/CarnotKE
|
refs/heads/master
|
jyhton/lib-python/2.7/plat-mac/Carbon/File.py
|
82
|
from _File import *
|
Shadowsocksfork/ShadowsocksFork
|
refs/heads/master
|
tests/graceful_cli.py
|
977
|
#!/usr/bin/python
import socks
import time
SERVER_IP = '127.0.0.1'
SERVER_PORT = 8001
if __name__ == '__main__':
s = socks.socksocket()
s.set_proxy(socks.SOCKS5, SERVER_IP, 1081)
s.connect((SERVER_IP, SERVER_PORT))
s.send(b'test')
time.sleep(30)
s.close()
|
bohlian/erpnext
|
refs/heads/develop
|
erpnext/setup/doctype/terms_and_conditions/terms_and_conditions.py
|
32
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.model.document import Document
from frappe.utils.jinja import validate_template
class TermsandConditions(Document):
def validate(self):
if self.terms:
validate_template(self.terms)
@frappe.whitelist()
def get_terms_and_conditions(template_name, doc):
if isinstance(doc, basestring):
doc = json.loads(doc)
terms_and_conditions = frappe.get_doc("Terms and Conditions", template_name)
if terms_and_conditions.terms:
return frappe.render_template(terms_and_conditions.terms, doc)
|
yongshengwang/builthue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.4.5/django/contrib/gis/geos/base.py
|
86
|
from ctypes import c_void_p
from types import NoneType
from django.contrib.gis.geos.error import GEOSException
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
HAS_GDAL = False
GEOJSON = False
gdal = GDALInfo()
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, (self.ptr_type, NoneType)):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
|
brutkin/commons
|
refs/heads/master
|
tests/python/twitter/common/metrics/test_sampling.py
|
13
|
# ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
import pytest
from twitter.common.contextutil import temporary_file
from twitter.common.metrics import Label
from twitter.common.metrics.metrics import Metrics
from twitter.common.metrics.sampler import (
MetricSampler,
SamplerBase,
DiskMetricWriter,
DiskMetricReader)
from twitter.common.quantity import Amount, Time
from twitter.common.testing.clock import ThreadedClock
def test_sampler_base():
class TestSampler(SamplerBase):
def __init__(self, period, clock):
self.count = 0
SamplerBase.__init__(self, period, clock)
def iterate(self):
self.count += 1
test_clock = ThreadedClock()
sampler = TestSampler(Amount(1, Time.SECONDS), clock=test_clock)
sampler.start()
assert test_clock.converge(threads=[sampler])
test_clock.assert_waiting(sampler, 1)
test_clock.tick(0.5)
assert test_clock.converge(threads=[sampler])
assert sampler.count == 0
test_clock.tick(0.5)
assert test_clock.converge(threads=[sampler])
assert sampler.count == 1
test_clock.tick(5)
assert test_clock.converge(threads=[sampler])
assert sampler.count == 6
assert not sampler.is_stopped()
sampler.stop()
# make sure that stopping the sampler short circuits any sampling
test_clock.tick(5)
assert test_clock.converge(threads=[sampler])
assert sampler.count == 6
def test_metric_read_write():
metrics = Metrics()
with temporary_file() as fp:
os.unlink(fp.name)
writer = DiskMetricWriter(metrics, fp.name)
reader = DiskMetricReader(fp.name)
assert reader.sample() == {}
reader.iterate()
assert reader.sample() == {}
writer.iterate()
assert reader.sample() == {}
reader.iterate()
assert reader.sample() == {}
metrics.register(Label('herp', 'derp'))
writer.iterate()
assert reader.sample() == {}
reader.iterate()
assert reader.sample() == {'herp': 'derp'}
def test_metric_sample():
metrics = Metrics()
sampler = MetricSampler(metrics)
assert sampler.sample() == {}
sampler.iterate()
assert sampler.sample() == {}
metrics.register(Label('herp', 'derp'))
assert sampler.sample() == {}
sampler.iterate()
assert sampler.sample() == {'herp': 'derp'}
|
jcoady9/python-for-android
|
refs/heads/master
|
python3-alpha/extra_modules/gdata/tlslite/utils/Python_AES.py
|
48
|
"""Pure-Python AES implementation."""
from .cryptomath import *
from .AES import *
from .rijndael import rijndael
def new(key, mode, IV):
return Python_AES(key, mode, IV)
class Python_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "python")
self.rijndael = rijndael(key, 16)
self.IV = IV
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
plaintextBytes = stringToBytes(plaintext)
chainBytes = stringToBytes(self.IV)
#CBC Mode: For each block...
for x in range(len(plaintextBytes)/16):
#XOR with the chaining block
blockBytes = plaintextBytes[x*16 : (x*16)+16]
for y in range(16):
blockBytes[y] ^= chainBytes[y]
blockString = bytesToString(blockBytes)
#Encrypt it
encryptedBytes = stringToBytes(self.rijndael.encrypt(blockString))
#Overwrite the input with the output
for y in range(16):
plaintextBytes[(x*16)+y] = encryptedBytes[y]
#Set the next chaining block
chainBytes = encryptedBytes
self.IV = bytesToString(chainBytes)
return bytesToString(plaintextBytes)
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
ciphertextBytes = stringToBytes(ciphertext)
chainBytes = stringToBytes(self.IV)
#CBC Mode: For each block...
for x in range(len(ciphertextBytes)/16):
#Decrypt it
blockBytes = ciphertextBytes[x*16 : (x*16)+16]
blockString = bytesToString(blockBytes)
decryptedBytes = stringToBytes(self.rijndael.decrypt(blockString))
#XOR with the chaining block and overwrite the input with output
for y in range(16):
decryptedBytes[y] ^= chainBytes[y]
ciphertextBytes[(x*16)+y] = decryptedBytes[y]
#Set the next chaining block
chainBytes = blockBytes
self.IV = bytesToString(chainBytes)
return bytesToString(ciphertextBytes)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.