text stringlengths 4 1.02M | meta dict |
|---|---|
from .cloud_tts import (
AudioConfig,
AudioEncoding,
CustomVoiceParams,
ListVoicesRequest,
ListVoicesResponse,
SsmlVoiceGender,
SynthesisInput,
SynthesizeSpeechRequest,
SynthesizeSpeechResponse,
Timepoint,
Voice,
VoiceSelectionParams,
)
__all__ = (
"AudioConfig",
"CustomVoiceParams",
"ListVoicesRequest",
"ListVoicesResponse",
"SynthesisInput",
"SynthesizeSpeechRequest",
"SynthesizeSpeechResponse",
"Timepoint",
"Voice",
"VoiceSelectionParams",
"AudioEncoding",
"SsmlVoiceGender",
)
| {
"content_hash": "dbdb23ffab64f0b58d0973cf02ee35df",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 31,
"avg_line_length": 20.06896551724138,
"alnum_prop": 0.6752577319587629,
"repo_name": "googleapis/python-texttospeech",
"id": "2a34ccab0090fbcd18a4fda4b8321c43b5f5f997",
"size": "1182",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "google/cloud/texttospeech_v1beta1/types/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "368339"
},
{
"name": "Shell",
"bytes": "30678"
}
],
"symlink_target": ""
} |
"""this module contains utilities for rebuilding a _ast tree in
order to get a single Astroid representation
"""
import sys
import _ast
import astroid
from astroid import astpeephole
from astroid import nodes
_BIN_OP_CLASSES = {_ast.Add: '+',
_ast.BitAnd: '&',
_ast.BitOr: '|',
_ast.BitXor: '^',
_ast.Div: '/',
_ast.FloorDiv: '//',
_ast.Mod: '%',
_ast.Mult: '*',
_ast.Pow: '**',
_ast.Sub: '-',
_ast.LShift: '<<',
_ast.RShift: '>>',
}
if sys.version_info >= (3, 5):
_BIN_OP_CLASSES[_ast.MatMult] = '@'
_BOOL_OP_CLASSES = {_ast.And: 'and',
_ast.Or: 'or',
}
_UNARY_OP_CLASSES = {_ast.UAdd: '+',
_ast.USub: '-',
_ast.Not: 'not',
_ast.Invert: '~',
}
_CMP_OP_CLASSES = {_ast.Eq: '==',
_ast.Gt: '>',
_ast.GtE: '>=',
_ast.In: 'in',
_ast.Is: 'is',
_ast.IsNot: 'is not',
_ast.Lt: '<',
_ast.LtE: '<=',
_ast.NotEq: '!=',
_ast.NotIn: 'not in',
}
CONST_NAME_TRANSFORMS = {'None': None,
'True': True,
'False': False,
}
REDIRECT = {'arguments': 'Arguments',
'comprehension': 'Comprehension',
"ListCompFor": 'Comprehension',
"GenExprFor": 'Comprehension',
'excepthandler': 'ExceptHandler',
'keyword': 'Keyword',
}
PY3 = sys.version_info >= (3, 0)
PY34 = sys.version_info >= (3, 4)
CONTEXTS = {_ast.Load: astroid.Load,
_ast.Store: astroid.Store,
_ast.Del: astroid.Del,
_ast.Param: astroid.Store}
def _get_doc(node):
try:
if isinstance(node.body[0], _ast.Expr) and isinstance(node.body[0].value, _ast.Str):
doc = node.body[0].value.s
node.body = node.body[1:]
return node, doc
except IndexError:
pass # ast built from scratch
return node, None
def _visit_or_none(node, attr, visitor, parent, visit='visit',
**kws):
"""If the given node has an attribute, visits the attribute, and
otherwise returns None.
"""
value = getattr(node, attr, None)
if value:
return getattr(visitor, visit)(value, parent, **kws)
return None
def _get_context(node):
return CONTEXTS.get(type(node.ctx), astroid.Load)
class TreeRebuilder(object):
"""Rebuilds the _ast tree to become an Astroid tree"""
def __init__(self, manager):
self._manager = manager
self._global_names = []
self._import_from_nodes = []
self._delayed_assattr = []
self._visit_meths = {}
self._peepholer = astpeephole.ASTPeepholeOptimizer()
def visit_module(self, node, modname, modpath, package):
"""visit a Module node by returning a fresh instance of it"""
node, doc = _get_doc(node)
newnode = nodes.Module(name=modname, doc=doc, file=modpath, path=modpath,
package=package, parent=None)
newnode.postinit([self.visit(child, newnode) for child in node.body])
return newnode
def visit(self, node, parent):
cls = node.__class__
if cls in self._visit_meths:
visit_method = self._visit_meths[cls]
else:
cls_name = cls.__name__
visit_name = 'visit_' + REDIRECT.get(cls_name, cls_name).lower()
visit_method = getattr(self, visit_name)
self._visit_meths[cls] = visit_method
return visit_method(node, parent)
def _save_assignment(self, node, name=None):
"""save assignement situation since node.parent is not available yet"""
if self._global_names and node.name in self._global_names[-1]:
node.root().set_local(node.name, node)
else:
node.parent.set_local(node.name, node)
def visit_arguments(self, node, parent):
"""visit a Arguments node by returning a fresh instance of it"""
vararg, kwarg = node.vararg, node.kwarg
if PY34:
newnode = nodes.Arguments(vararg.arg if vararg else None,
kwarg.arg if kwarg else None,
parent)
else:
newnode = nodes.Arguments(vararg, kwarg, parent)
args = [self.visit(child, newnode) for child in node.args]
defaults = [self.visit(child, newnode)
for child in node.defaults]
varargannotation = None
kwargannotation = None
# change added in 82732 (7c5c678e4164), vararg and kwarg
# are instances of `_ast.arg`, not strings
if vararg:
if PY34:
if node.vararg.annotation:
varargannotation = self.visit(node.vararg.annotation,
newnode)
vararg = vararg.arg
elif PY3 and node.varargannotation:
varargannotation = self.visit(node.varargannotation,
newnode)
if kwarg:
if PY34:
if node.kwarg.annotation:
kwargannotation = self.visit(node.kwarg.annotation,
newnode)
kwarg = kwarg.arg
elif PY3:
if node.kwargannotation:
kwargannotation = self.visit(node.kwargannotation,
newnode)
if PY3:
kwonlyargs = [self.visit(child, newnode) for child
in node.kwonlyargs]
kw_defaults = [self.visit(child, newnode) if child else
None for child in node.kw_defaults]
annotations = [self.visit(arg.annotation, newnode) if
arg.annotation else None for arg in node.args]
kwonlyargs_annotations = [
self.visit(arg.annotation, newnode) if arg.annotation else None
for arg in node.kwonlyargs
]
else:
kwonlyargs = []
kw_defaults = []
annotations = []
kwonlyargs_annotations = []
newnode.postinit(
args=args,
defaults=defaults,
kwonlyargs=kwonlyargs,
kw_defaults=kw_defaults,
annotations=annotations,
kwonlyargs_annotations=kwonlyargs_annotations,
varargannotation=varargannotation,
kwargannotation=kwargannotation
)
# save argument names in locals:
if vararg:
newnode.parent.set_local(vararg, newnode)
if kwarg:
newnode.parent.set_local(kwarg, newnode)
return newnode
def visit_assert(self, node, parent):
"""visit a Assert node by returning a fresh instance of it"""
newnode = nodes.Assert(node.lineno, node.col_offset, parent)
if node.msg:
msg = self.visit(node.msg, newnode)
else:
msg = None
newnode.postinit(self.visit(node.test, newnode), msg)
return newnode
def visit_assign(self, node, parent):
"""visit a Assign node by returning a fresh instance of it"""
newnode = nodes.Assign(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.targets],
self.visit(node.value, newnode))
return newnode
def visit_assignname(self, node, parent, node_name=None):
'''visit a node and return a AssignName node'''
newnode = nodes.AssignName(node_name, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
self._save_assignment(newnode)
return newnode
def visit_augassign(self, node, parent):
"""visit a AugAssign node by returning a fresh instance of it"""
newnode = nodes.AugAssign(_BIN_OP_CLASSES[type(node.op)] + "=",
node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.target, newnode),
self.visit(node.value, newnode))
return newnode
def visit_repr(self, node, parent):
"""visit a Backquote node by returning a fresh instance of it"""
newnode = nodes.Repr(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_binop(self, node, parent):
"""visit a BinOp node by returning a fresh instance of it"""
if isinstance(node.left, _ast.BinOp) and self._manager.optimize_ast:
# Optimize BinOp operations in order to remove
# redundant recursion. For instance, if the
# following code is parsed in order to obtain
# its ast, then the rebuilder will fail with an
# infinite recursion, the same will happen with the
# inference engine as well. There's no need to hold
# so many objects for the BinOp if they can be reduced
# to something else (also, the optimization
# might handle only Const binops, which isn't a big
# problem for the correctness of the program).
#
# ("a" + "b" + # one thousand more + "c")
optimized = self._peepholer.optimize_binop(node, parent)
if optimized:
return optimized
newnode = nodes.BinOp(_BIN_OP_CLASSES[type(node.op)],
node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.left, newnode),
self.visit(node.right, newnode))
return newnode
def visit_boolop(self, node, parent):
"""visit a BoolOp node by returning a fresh instance of it"""
newnode = nodes.BoolOp(_BOOL_OP_CLASSES[type(node.op)],
node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.values])
return newnode
def visit_break(self, node, parent):
"""visit a Break node by returning a fresh instance of it"""
return nodes.Break(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None),
parent)
def visit_call(self, node, parent):
"""visit a CallFunc node by returning a fresh instance of it"""
newnode = nodes.Call(node.lineno, node.col_offset, parent)
starargs = _visit_or_none(node, 'starargs', self, newnode)
kwargs = _visit_or_none(node, 'kwargs', self, newnode)
args = [self.visit(child, newnode)
for child in node.args]
if node.keywords:
keywords = [self.visit(child, newnode)
for child in node.keywords]
else:
keywords = None
if starargs:
new_starargs = nodes.Starred(col_offset=starargs.col_offset,
lineno=starargs.lineno,
parent=starargs.parent)
new_starargs.postinit(value=starargs)
args.append(new_starargs)
if kwargs:
new_kwargs = nodes.Keyword(arg=None, col_offset=kwargs.col_offset,
lineno=kwargs.lineno,
parent=kwargs.parent)
new_kwargs.postinit(value=kwargs)
if keywords:
keywords.append(new_kwargs)
else:
keywords = [new_kwargs]
newnode.postinit(self.visit(node.func, newnode),
args, keywords)
return newnode
def visit_classdef(self, node, parent, newstyle=None):
"""visit a ClassDef node to become astroid"""
node, doc = _get_doc(node)
newnode = nodes.ClassDef(node.name, doc, node.lineno,
node.col_offset, parent)
metaclass = None
if PY3:
for keyword in node.keywords:
if keyword.arg == 'metaclass':
metaclass = self.visit(keyword, newnode).value
break
if node.decorator_list:
decorators = self.visit_decorators(node, newnode)
else:
decorators = None
newnode.postinit([self.visit(child, newnode)
for child in node.bases],
[self.visit(child, newnode)
for child in node.body],
decorators, newstyle, metaclass,
[self.visit(kwd, newnode) for kwd in node.keywords
if kwd.arg != 'metaclass'] if PY3 else [])
return newnode
def visit_const(self, node, parent):
"""visit a Const node by returning a fresh instance of it"""
return nodes.Const(node.value,
getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_continue(self, node, parent):
"""visit a Continue node by returning a fresh instance of it"""
return nodes.Continue(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None),
parent)
def visit_compare(self, node, parent):
"""visit a Compare node by returning a fresh instance of it"""
newnode = nodes.Compare(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.left, newnode),
[(_CMP_OP_CLASSES[op.__class__],
self.visit(expr, newnode))
for (op, expr) in zip(node.ops, node.comparators)])
return newnode
def visit_comprehension(self, node, parent):
"""visit a Comprehension node by returning a fresh instance of it"""
newnode = nodes.Comprehension(parent)
newnode.postinit(self.visit(node.target, newnode),
self.visit(node.iter, newnode),
[self.visit(child, newnode)
for child in node.ifs],
getattr(node, 'is_async', None))
return newnode
def visit_decorators(self, node, parent):
"""visit a Decorators node by returning a fresh instance of it"""
# /!\ node is actually a _ast.FunctionDef node while
# parent is a astroid.nodes.FunctionDef node
newnode = nodes.Decorators(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.decorator_list])
return newnode
def visit_delete(self, node, parent):
"""visit a Delete node by returning a fresh instance of it"""
newnode = nodes.Delete(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.targets])
return newnode
def _visit_dict_items(self, node, parent, newnode):
for key, value in zip(node.keys, node.values):
rebuilt_value = self.visit(value, newnode)
if not key:
# Python 3.5 and extended unpacking
rebuilt_key = nodes.DictUnpack(rebuilt_value.lineno,
rebuilt_value.col_offset,
parent)
else:
rebuilt_key = self.visit(key, newnode)
yield rebuilt_key, rebuilt_value
def visit_dict(self, node, parent):
"""visit a Dict node by returning a fresh instance of it"""
newnode = nodes.Dict(node.lineno, node.col_offset, parent)
items = list(self._visit_dict_items(node, parent, newnode))
newnode.postinit(items)
return newnode
def visit_dictcomp(self, node, parent):
"""visit a DictComp node by returning a fresh instance of it"""
newnode = nodes.DictComp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.key, newnode),
self.visit(node.value, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_expr(self, node, parent):
"""visit a Expr node by returning a fresh instance of it"""
newnode = nodes.Expr(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_ellipsis(self, node, parent):
"""visit an Ellipsis node by returning a fresh instance of it"""
return nodes.Ellipsis(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_emptynode(self, node, parent):
"""visit an EmptyNode node by returning a fresh instance of it"""
return nodes.EmptyNode(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_excepthandler(self, node, parent):
"""visit an ExceptHandler node by returning a fresh instance of it"""
newnode = nodes.ExceptHandler(node.lineno, node.col_offset, parent)
# /!\ node.name can be a tuple
newnode.postinit(_visit_or_none(node, 'type', self, newnode),
_visit_or_none(node, 'name', self, newnode),
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_exec(self, node, parent):
"""visit an Exec node by returning a fresh instance of it"""
newnode = nodes.Exec(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.body, newnode),
_visit_or_none(node, 'globals', self, newnode),
_visit_or_none(node, 'locals', self, newnode))
return newnode
def visit_extslice(self, node, parent):
"""visit an ExtSlice node by returning a fresh instance of it"""
newnode = nodes.ExtSlice(parent=parent)
newnode.postinit([self.visit(dim, newnode)
for dim in node.dims])
return newnode
def _visit_for(self, cls, node, parent):
"""visit a For node by returning a fresh instance of it"""
newnode = cls(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.target, newnode),
self.visit(node.iter, newnode),
[self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_for(self, node, parent):
return self._visit_for(nodes.For, node, parent)
def visit_importfrom(self, node, parent):
"""visit an ImportFrom node by returning a fresh instance of it"""
names = [(alias.name, alias.asname) for alias in node.names]
newnode = nodes.ImportFrom(node.module or '', names, node.level or None,
getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
# store From names to add them to locals after building
self._import_from_nodes.append(newnode)
return newnode
def _visit_functiondef(self, cls, node, parent):
"""visit an FunctionDef node to become astroid"""
self._global_names.append({})
node, doc = _get_doc(node)
newnode = cls(node.name, doc, node.lineno,
node.col_offset, parent)
if node.decorator_list:
decorators = self.visit_decorators(node, newnode)
else:
decorators = None
if PY3 and node.returns:
returns = self.visit(node.returns, newnode)
else:
returns = None
newnode.postinit(self.visit(node.args, newnode),
[self.visit(child, newnode)
for child in node.body],
decorators, returns)
self._global_names.pop()
return newnode
def visit_functiondef(self, node, parent):
return self._visit_functiondef(nodes.FunctionDef, node, parent)
def visit_generatorexp(self, node, parent):
"""visit a GeneratorExp node by returning a fresh instance of it"""
newnode = nodes.GeneratorExp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.elt, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_attribute(self, node, parent):
"""visit an Attribute node by returning a fresh instance of it"""
context = _get_context(node)
if context == astroid.Del:
# FIXME : maybe we should reintroduce and visit_delattr ?
# for instance, deactivating assign_ctx
newnode = nodes.DelAttr(node.attr, node.lineno, node.col_offset,
parent)
elif context == astroid.Store:
newnode = nodes.AssignAttr(node.attr, node.lineno, node.col_offset,
parent)
# Prohibit a local save if we are in an ExceptHandler.
if not isinstance(parent, astroid.ExceptHandler):
self._delayed_assattr.append(newnode)
else:
newnode = nodes.Attribute(node.attr, node.lineno, node.col_offset,
parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_global(self, node, parent):
"""visit a Global node to become astroid"""
newnode = nodes.Global(node.names, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
if self._global_names: # global at the module level, no effect
for name in node.names:
self._global_names[-1].setdefault(name, []).append(newnode)
return newnode
def visit_if(self, node, parent):
"""visit an If node by returning a fresh instance of it"""
newnode = nodes.If(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.test, newnode),
[self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_ifexp(self, node, parent):
"""visit a IfExp node by returning a fresh instance of it"""
newnode = nodes.IfExp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.test, newnode),
self.visit(node.body, newnode),
self.visit(node.orelse, newnode))
return newnode
def visit_import(self, node, parent):
"""visit a Import node by returning a fresh instance of it"""
names = [(alias.name, alias.asname) for alias in node.names]
newnode = nodes.Import(names, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
# save import names in parent's locals:
for (name, asname) in newnode.names:
name = asname or name
parent.set_local(name.split('.')[0], newnode)
return newnode
def visit_index(self, node, parent):
"""visit a Index node by returning a fresh instance of it"""
newnode = nodes.Index(parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_keyword(self, node, parent):
"""visit a Keyword node by returning a fresh instance of it"""
newnode = nodes.Keyword(node.arg, parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_lambda(self, node, parent):
"""visit a Lambda node by returning a fresh instance of it"""
newnode = nodes.Lambda(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.args, newnode),
self.visit(node.body, newnode))
return newnode
def visit_list(self, node, parent):
"""visit a List node by returning a fresh instance of it"""
context = _get_context(node)
newnode = nodes.List(ctx=context,
lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit([self.visit(child, newnode)
for child in node.elts])
return newnode
def visit_listcomp(self, node, parent):
"""visit a ListComp node by returning a fresh instance of it"""
newnode = nodes.ListComp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.elt, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_name(self, node, parent):
"""visit a Name node by returning a fresh instance of it"""
context = _get_context(node)
# True and False can be assigned to something in py2x, so we have to
# check first the context.
if context == astroid.Del:
newnode = nodes.DelName(node.id, node.lineno, node.col_offset,
parent)
elif context == astroid.Store:
newnode = nodes.AssignName(node.id, node.lineno, node.col_offset,
parent)
elif node.id in CONST_NAME_TRANSFORMS:
newnode = nodes.Const(CONST_NAME_TRANSFORMS[node.id],
getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
return newnode
else:
newnode = nodes.Name(node.id, node.lineno, node.col_offset, parent)
# XXX REMOVE me :
if context in (astroid.Del, astroid.Store): # 'Aug' ??
self._save_assignment(newnode)
return newnode
def visit_str(self, node, parent):
"""visit a String/Bytes node by returning a fresh instance of Const"""
return nodes.Const(node.s, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
visit_bytes = visit_str
def visit_num(self, node, parent):
"""visit a Num node by returning a fresh instance of Const"""
return nodes.Const(node.n, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_pass(self, node, parent):
"""visit a Pass node by returning a fresh instance of it"""
return nodes.Pass(node.lineno, node.col_offset, parent)
def visit_print(self, node, parent):
"""visit a Print node by returning a fresh instance of it"""
newnode = nodes.Print(node.nl, node.lineno, node.col_offset, parent)
newnode.postinit(_visit_or_none(node, 'dest', self, newnode),
[self.visit(child, newnode)
for child in node.values])
return newnode
def visit_raise(self, node, parent):
"""visit a Raise node by returning a fresh instance of it"""
newnode = nodes.Raise(node.lineno, node.col_offset, parent)
newnode.postinit(_visit_or_none(node, 'type', self, newnode),
_visit_or_none(node, 'inst', self, newnode),
_visit_or_none(node, 'tback', self, newnode))
return newnode
def visit_return(self, node, parent):
"""visit a Return node by returning a fresh instance of it"""
newnode = nodes.Return(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_set(self, node, parent):
"""visit a Set node by returning a fresh instance of it"""
newnode = nodes.Set(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.elts])
return newnode
def visit_setcomp(self, node, parent):
"""visit a SetComp node by returning a fresh instance of it"""
newnode = nodes.SetComp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.elt, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_slice(self, node, parent):
"""visit a Slice node by returning a fresh instance of it"""
newnode = nodes.Slice(parent=parent)
newnode.postinit(_visit_or_none(node, 'lower', self, newnode),
_visit_or_none(node, 'upper', self, newnode),
_visit_or_none(node, 'step', self, newnode))
return newnode
def visit_subscript(self, node, parent):
"""visit a Subscript node by returning a fresh instance of it"""
context = _get_context(node)
newnode = nodes.Subscript(ctx=context,
lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit(self.visit(node.value, newnode),
self.visit(node.slice, newnode))
return newnode
def visit_tryexcept(self, node, parent):
"""visit a TryExcept node by returning a fresh instance of it"""
newnode = nodes.TryExcept(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.handlers],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_tryfinally(self, node, parent):
"""visit a TryFinally node by returning a fresh instance of it"""
newnode = nodes.TryFinally(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.body],
[self.visit(n, newnode)
for n in node.finalbody])
return newnode
def visit_tuple(self, node, parent):
"""visit a Tuple node by returning a fresh instance of it"""
context = _get_context(node)
newnode = nodes.Tuple(ctx=context,
lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit([self.visit(child, newnode)
for child in node.elts])
return newnode
def visit_unaryop(self, node, parent):
"""visit a UnaryOp node by returning a fresh instance of it"""
newnode = nodes.UnaryOp(_UNARY_OP_CLASSES[node.op.__class__],
node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.operand, newnode))
return newnode
def visit_while(self, node, parent):
"""visit a While node by returning a fresh instance of it"""
newnode = nodes.While(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.test, newnode),
[self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_with(self, node, parent):
newnode = nodes.With(node.lineno, node.col_offset, parent)
expr = self.visit(node.context_expr, newnode)
if node.optional_vars is not None:
optional_vars = self.visit(node.optional_vars, newnode)
else:
optional_vars = None
newnode.postinit([(expr, optional_vars)],
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_yield(self, node, parent):
"""visit a Yield node by returning a fresh instance of it"""
newnode = nodes.Yield(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode
class TreeRebuilder3(TreeRebuilder):
"""extend and overwrite TreeRebuilder for python3k"""
def visit_arg(self, node, parent):
"""visit a arg node by returning a fresh AssName instance"""
# TODO(cpopa): introduce an Arg node instead of using AssignName.
return self.visit_assignname(node, parent, node.arg)
def visit_nameconstant(self, node, parent):
# in Python 3.4 we have NameConstant for True / False / None
return nodes.Const(node.value, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_excepthandler(self, node, parent):
"""visit an ExceptHandler node by returning a fresh instance of it"""
newnode = nodes.ExceptHandler(node.lineno, node.col_offset, parent)
if node.name:
name = self.visit_assignname(node, newnode, node.name)
else:
name = None
newnode.postinit(_visit_or_none(node, 'type', self, newnode),
name,
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_nonlocal(self, node, parent):
"""visit a Nonlocal node and return a new instance of it"""
return nodes.Nonlocal(node.names, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_raise(self, node, parent):
"""visit a Raise node by returning a fresh instance of it"""
newnode = nodes.Raise(node.lineno, node.col_offset, parent)
# no traceback; anyway it is not used in Pylint
newnode.postinit(_visit_or_none(node, 'exc', self, newnode),
_visit_or_none(node, 'cause', self, newnode))
return newnode
def visit_starred(self, node, parent):
"""visit a Starred node and return a new instance of it"""
context = _get_context(node)
newnode = nodes.Starred(ctx=context, lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_try(self, node, parent):
# python 3.3 introduce a new Try node replacing
# TryFinally/TryExcept nodes
if node.finalbody:
newnode = nodes.TryFinally(node.lineno, node.col_offset, parent)
if node.handlers:
body = [self.visit_tryexcept(node, newnode)]
else:
body = [self.visit(child, newnode)
for child in node.body]
newnode.postinit(body,
[self.visit(n, newnode)
for n in node.finalbody])
return newnode
elif node.handlers:
return self.visit_tryexcept(node, parent)
def visit_annassign(self, node, parent):
"""visit an AnnAssign node by returning a fresh instance of it"""
newnode = nodes.AnnAssign(node.lineno, node.col_offset, parent)
annotation = _visit_or_none(node, 'annotation', self, newnode)
newnode.postinit(target=self.visit(node.target, newnode),
annotation=annotation,
simple=node.simple,
value=_visit_or_none(node, 'value', self, newnode))
return newnode
def _visit_with(self, cls, node, parent):
if 'items' not in node._fields:
# python < 3.3
return super(TreeRebuilder3, self).visit_with(node, parent)
newnode = cls(node.lineno, node.col_offset, parent)
def visit_child(child):
expr = self.visit(child.context_expr, newnode)
var = _visit_or_none(child, 'optional_vars', self, newnode)
return expr, var
newnode.postinit([visit_child(child) for child in node.items],
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_with(self, node, parent):
return self._visit_with(nodes.With, node, parent)
def visit_yieldfrom(self, node, parent):
newnode = nodes.YieldFrom(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_classdef(self, node, parent, newstyle=True):
return super(TreeRebuilder3, self).visit_classdef(node, parent,
newstyle=newstyle)
# Async structs added in Python 3.5
def visit_asyncfunctiondef(self, node, parent):
return self._visit_functiondef(nodes.AsyncFunctionDef, node, parent)
def visit_asyncfor(self, node, parent):
return self._visit_for(nodes.AsyncFor, node, parent)
def visit_await(self, node, parent):
newnode = nodes.Await(node.lineno, node.col_offset, parent)
newnode.postinit(value=self.visit(node.value, newnode))
return newnode
def visit_asyncwith(self, node, parent):
return self._visit_with(nodes.AsyncWith, node, parent)
def visit_joinedstr(self, node, parent):
newnode = nodes.JoinedStr(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.values])
return newnode
def visit_formattedvalue(self, node, parent):
newnode = nodes.FormattedValue(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode),
node.conversion,
_visit_or_none(node, 'format_spec', self, newnode))
return newnode
if sys.version_info >= (3, 0):
TreeRebuilder = TreeRebuilder3
| {
"content_hash": "6a2c17286a6ba0afcfb7080c7d7967eb",
"timestamp": "",
"source": "github",
"line_count": 906,
"max_line_length": 92,
"avg_line_length": 43.13245033112583,
"alnum_prop": 0.5558882235528942,
"repo_name": "ClovisIRex/Snake-django",
"id": "60a1ad777d74eac972d8c5480a9210b9449ca934",
"size": "39479",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "env/lib/python3.6/site-packages/astroid/rebuilder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6771"
},
{
"name": "HTML",
"bytes": "3435"
},
{
"name": "JavaScript",
"bytes": "2172"
},
{
"name": "Python",
"bytes": "8285"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PreferencesV20(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'locale': 'str'
}
attribute_map = {
'locale': 'locale'
}
def __init__(self, locale=None): # noqa: E501
"""PreferencesV20 - a model defined in Swagger""" # noqa: E501
self._locale = None
self.discriminator = None
if locale is not None:
self.locale = locale
@property
def locale(self):
"""Gets the locale of this PreferencesV20. # noqa: E501
:return: The locale of this PreferencesV20. # noqa: E501
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this PreferencesV20.
:param locale: The locale of this PreferencesV20. # noqa: E501
:type: str
"""
allowed_values = ["AR", "EN", "ES", "FR", "KO", "PT", "RU", "ZH_CN", "ZH_TW", "IT", "JA", "XX"] # noqa: E501
if locale not in allowed_values:
raise ValueError(
"Invalid value for `locale` ({0}), must be one of {1}" # noqa: E501
.format(locale, allowed_values)
)
self._locale = locale
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PreferencesV20, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PreferencesV20):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| {
"content_hash": "a1b5707e483811ae7033dff5105a23f4",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 119,
"avg_line_length": 29.51304347826087,
"alnum_prop": 0.5332940483205657,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "5be0777c6c0e8879b061c4aaa5467886e870940b",
"size": "3411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/preferences_v20.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
} |
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['onlinemovies.tube', 'watchonline.pro']
self.base_link = 'http://watchonline.pro'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episode/%s-s%02de%02d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})
y += [i for i in client.parseDOM(r, 'div', attrs = {'class': 'metadatac'}) if 'date' in i]
y = re.findall('(\d{4})', y[0])[0]
if not y == year: raise Exception()
else:
#url = '%s/watch/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
else:
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
links = client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
url = link.replace('\/', '/')
url = client.replaceHTMLCodes(url)
url = 'http:' + url if url.startswith('//') else url
url = url.encode('utf-8')
if not '.php' in url: raise Exception()
r = client.request(url, timeout='10')
s = re.compile('<script>(.+?)</script>', re.DOTALL).findall(r)
for i in s:
try: r += jsunpack.unpack(i)
except: pass
r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r)
for i in r:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| {
"content_hash": "0d113e4d44b4d635f164282bfc18bbf9",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 182,
"avg_line_length": 34.44604316546763,
"alnum_prop": 0.5167084377610693,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "be6e2c096f958d2f3ea6dbb888151cf6f1f9df0f",
"size": "4829",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "script.module.uncoded/lib/resources/lib/sources/en/to_be_fixed/sitedown/onlinemovies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
import functools
from django import http
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from olympia import amo
from olympia.access import acl
from olympia.addons.decorators import addon_view_factory
from olympia.addons.models import Addon
from olympia.amo.decorators import login_required
from olympia.constants import permissions
def dev_required(owner_for_post=False, allow_reviewers_for_read=False,
submitting=False, qs=Addon.objects.all):
"""Requires user to be add-on owner or admin.
When allow_reviewers is True, reviewers can view the page.
"""
def decorator(f):
@addon_view_factory(qs=qs)
@login_required
@functools.wraps(f)
def wrapper(request, addon, *args, **kw):
def fun():
return f(request, addon_id=addon.id, addon=addon, *args, **kw)
if request.method in ('HEAD', 'GET'):
# Allow reviewers for read operations.
if allow_reviewers_for_read and (
acl.is_reviewer(request, addon) or acl.action_allowed(
request, permissions.REVIEWER_TOOLS_VIEW)):
return fun()
# On read-only requests, ignore disabled so developers can
# still view their add-on.
if acl.check_addon_ownership(request, addon, dev=True,
ignore_disabled=True):
# Redirect to the submit flow if they're not done.
if (not submitting and
addon.should_redirect_to_submit_flow()):
return redirect('devhub.submit.details', addon.slug)
return fun()
# Require an owner or dev for POST requests (if the add-on status
# is disabled that check will return False).
elif request.method == 'POST':
if acl.check_addon_ownership(
request, addon, dev=not owner_for_post):
return fun()
raise PermissionDenied
return wrapper
# The arg will be a function if they didn't pass owner_for_post.
if callable(owner_for_post):
f = owner_for_post
owner_for_post = False
return decorator(f)
else:
return decorator
def no_admin_disabled(f):
"""Requires the addon not be STATUS_DISABLED (mozilla admin disabled)."""
@functools.wraps(f)
def wrapper(*args, **kw):
addon = kw.get('addon')
if addon and addon.status == amo.STATUS_DISABLED:
raise http.Http404()
return f(*args, **kw)
return wrapper
| {
"content_hash": "3283334f8ee0e82af357ca6ac1a6d594",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 39.2463768115942,
"alnum_prop": 0.5912112259970458,
"repo_name": "eviljeff/olympia",
"id": "d3f714ef978a50412f0f6c6daf2cf30ad5ef7574",
"size": "2708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/devhub/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "251925"
},
{
"name": "Dockerfile",
"bytes": "4063"
},
{
"name": "HTML",
"bytes": "314372"
},
{
"name": "JavaScript",
"bytes": "865804"
},
{
"name": "Less",
"bytes": "307222"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6146705"
},
{
"name": "Shell",
"bytes": "8000"
},
{
"name": "Smarty",
"bytes": "1413"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from .views import TestView
urlpatterns = [
url(r'^test/$', TestView.as_view())
]
| {
"content_hash": "9f02a689a0c2e38c0af36471fbdcc7e4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 39,
"avg_line_length": 15.25,
"alnum_prop": 0.680327868852459,
"repo_name": "tarkatronic/django-excel-response",
"id": "170cecbdd55bff1146f82543912c80819c604ef9",
"size": "122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testapp/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18521"
}
],
"symlink_target": ""
} |
import os
import urllib
import djangoappengine.main.main
from google.appengine.ext import blobstore
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
class MainHandler(webapp.RequestHandler):
def get(self):
upload_url = blobstore.create_upload_url('/img/upload')
self.response.out.write('<html><body>')
self.response.out.write('<form action="%s" method="POST" enctype="multipart/form-data">' % upload_url)
self.response.out.write("""Upload File: <input type="file" name="file"><br> <input type="submit"
name="submit" value="Submit"> </form></body></html>""")
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
upload_files = self.get_uploads('file') # 'file' is file upload field in the form
blob_info = upload_files[0]
self.redirect('/img/serve/%s' % blob_info.key())
class ServeHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, resource):
resource = str(urllib.unquote(resource))
blob_info = blobstore.BlobInfo.get(resource)
self.send_blob(blob_info)
def main():
application = webapp.WSGIApplication(
[('/img/', MainHandler),
('/img/upload', UploadHandler),
('/img/serve/([^/]+)?', ServeHandler),
], debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| {
"content_hash": "33452492945e982892d9bc0e8722a310",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 110,
"avg_line_length": 38.4,
"alnum_prop": 0.6731770833333334,
"repo_name": "GoSteven/Diary",
"id": "b33b09bdea011f0561bbcc84a99b9076602134c7",
"size": "1561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "img_blobstore.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "96307"
},
{
"name": "Python",
"bytes": "4274191"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, url_for
import json
import os.path
import locale
app = Flask(__name__)
supported_languages = set(['pt', 'en', 'de'])
page_file = 'pages/%(lang)s/%(page)s.html'
@app.route('/')
@app.route('/<language>/')
def route_home(language = 'en'):
is_valid_language = language in supported_languages
if not is_valid_language:
return render_page('en', '404', code = 404)
index = read_json('posts.json')['posts'][language]
vars = {'posts': index, 'format_datetime': format_datetime}
return render_page(language, 'index', vars)
# TODO: we should really create "catchers" for each param here instead of
# checking them inside the function
@app.route('/<language>/<page_uri>.html')
def route_page(language, page_uri):
return render_page(language, page_uri)
def render_page(language, page_uri, vars = {}, code = 200):
import re as regexp
is_valid_name = regexp.search('^[0-9\-\_a-z]+$', page_uri)
is_valid_language = language in supported_languages
path = page_file % {'lang': language, 'page': page_uri}
if (
not is_valid_name or
not is_valid_language or
not file_exists('templates/' + path)
):
path = '404.html'
code = 404
language = 'en'
i18n = read_json('i18n/' + language + '.json')
# must cast to string otherwise we'll have a
# ValueError: too many values to unpack
locale_lang = str(i18n['locale']) + '.utf8'
locale.setlocale(locale.LC_TIME, locale_lang)
vars['language'] = language
vars['format_datetime'] = format_datetime
#vars['uri'] = language + '/' + page_uri
if page_uri == 'index':
vars['uri'] = url_for('route_home', language = language)
else:
vars['uri'] = url_for('route_page', language = language, page_uri = page_uri)
return render_template(path, lang = i18n, vars = vars), code
def create_slug(string):
import unicodedata
# TODO: those replaces should be arrays of characters, huh?
string = (string
.replace('?', '')
.replace('!', '')
.replace(' ', '-')
.replace(',', '')
.lower())
string = unicode(string, 'utf-8')
return ''.join(c for c in unicodedata.normalize('NFD', string)
if unicodedata.category(c) != 'Mn')
def read_json(path):
try:
with open(path) as json_file:
data = json.load(json_file)
except Exception, e:
raise e
return data
def format_datetime(date, output_format, input_format = '%Y-%m-%d'):
from datetime import datetime
return datetime.strptime(date, input_format).strftime(output_format)
# Using 'open' here to avoid a race condition. More on this:
# stackoverflow.com/questions/82831/how-do-i-check-if-a-file-exists-using-python
def file_exists(path):
try:
with open(path):
return True
except IOError:
return False
@app.errorhandler(404)
def error_not_found(error):
return render_page('en', '404', 404)
if __name__ == '__main__':
# register the filter
app.jinja_env.filters['format_datetime'] = format_datetime
#app.run(debug = True, host='0.0.0.0')
app.run() | {
"content_hash": "8748d7a6df4c27b80ecbd92831e85cba",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 85,
"avg_line_length": 29.357142857142858,
"alnum_prop": 0.6027980535279805,
"repo_name": "rhpaiva/rhpaiva.com",
"id": "d6f0b6f95b6c4105c352fe2d4d7bc4fa920f7b3d",
"size": "3288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53204"
},
{
"name": "JavaScript",
"bytes": "5276"
},
{
"name": "Python",
"bytes": "6751"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
import smtplib
from django.http.response import HttpResponseRedirect
from django.shortcuts import render
from django import forms
class ContactForm(forms.Form):
name = forms.CharField(label='Name *', max_length=50)
email = forms.EmailField(label='Email *', max_length=50)
message = forms.CharField(label='Message *', max_length=4000, widget=forms.Textarea)
def contact(request):
if request.method == 'POST':
from django.core.mail import send_mail
form = ContactForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
sender = form.cleaned_data['email']
subject = "Contact Us Form"
message = "From: " + name + " (" + sender + ")\n\n"
message += form.cleaned_data['message']
recipients = ['codeepy@gmail.com']
result = "Your message has been delivered. Thank you for contacting SHARE.IT!!"
try:
send_mail(subject, message, sender, recipients)
except smtplib.SMTPException:
result = smtplib.SMTPException.message
return render(request, "contact.html", {"result": result, "style": "display: block"})
else:
return render(request, "contact.html", {"result": "Failed to send the message. Please validate your data.",
"style": "display: block"})
elif request.method == 'GET':
return render(request, "contact.html", {"style": "display: none"})
def about(request):
return render(request, "about.html")
def home(request):
return render(request, "home.html")
def api(request):
return render(request, "api.html")
class DonationForm(forms.Form):
name = forms.CharField(label='Name *', max_length=50)
email = forms.EmailField(label='Email *', max_length=50)
amount = forms.DecimalField(label='Amount *', max_digits=10, decimal_places=2)
def donation(request):
if request.method == 'POST':
form = DonationForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
email = form.cleaned_data['email']
amount = form.cleaned_data['amount']
return HttpResponseRedirect("http://www.justgiving.com/4w350m3/donation/direct/charity/2050?amount=" +
str(amount) + "¤cy=GBP&reference=shareit" +
"&exitUrl=http%3A%2F%2Fdumbastic.koding.io%2Fdonation%2F%3Fname%3D" + name +
"%26email%3D" + email)
else:
return render(request, "donation.html", {"result": "Failed to send the message. Please validate your data.",
"style": "display: block"})
elif request.method == 'GET':
name = request.GET.get('name', '')
email = request.GET.get('email', '')
if name != '' and email != '':
return render(request, "donation.html", {"result": "Your money has been donated. Thank you for your generosity.",
"style": "display: block"})
else:
return render(request, "donation.html", {"style": "display: none"})
def login(request):
return render(request, "login.html")
def register(request):
return render(request, "register.html")
def map(request):
return render(request, "map.html")
| {
"content_hash": "13a2a6a2819d838b47fb736f8a6d76cd",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 125,
"avg_line_length": 40.96470588235294,
"alnum_prop": 0.5792647903503734,
"repo_name": "Codeepy/Share.it",
"id": "28f85a19b1e09a9938b4e639c1a9692ae47bb10c",
"size": "3482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foodbank/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "157213"
},
{
"name": "HTML",
"bytes": "85883"
},
{
"name": "JavaScript",
"bytes": "259519"
},
{
"name": "Python",
"bytes": "30188"
}
],
"symlink_target": ""
} |
import git
import click
import json
import os
import shutil
import sys
import datetime
import time
REPOS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".repos"))
class RepoSettingsRepository(object):
@classmethod
def from_dict(cls, data):
return cls(**data)
def __init__(self, name, path, url, **kwargs):
self.name = name
self.path = path
self.url = url
def to_dict(self):
return {
"name": self.name,
"path": self.path,
"url": self.url,
}
class RepoSettings(object):
@classmethod
def from_dict(cls, data):
repos = []
for repo_data in data["repos"]:
repos.append(RepoSettingsRepository.from_dict(repo_data))
return cls(repos)
def __init__(self, repos=None):
if repos is None:
repos = []
self.repos = repos
def to_dict(self):
return {
"repos": [repo.to_dict() for repo in self.repos]
}
def get_settings():
try:
with open("repos.json", "r") as f:
return RepoSettings.from_dict(json.load(f))
except (IOError, ValueError):
return RepoSettings()
def save_settings(settings):
with open("repos.json", "w") as f:
json.dump(settings.to_dict(), f)
@click.group()
def gitactivity():
if not os.path.exists(REPOS_DIR):
os.makedirs(REPOS_DIR)
@gitactivity.command()
def fetch():
settings = get_settings()
for repo in settings.repos:
if not os.path.exists(repo.path):
repo = git.repo.Repo.clone_from(repo.url, repo.path)
else:
repo = git.repo.Repo(repo.path)
for remote in repo.remotes:
remote.fetch()
@gitactivity.command()
def list():
settings = get_settings()
for repo in settings.repos:
print "{} from {}".format(repo.name, repo.url)
@gitactivity.command()
@click.argument("name")
@click.argument("url")
def add(name, url):
settings = get_settings()
path = os.path.join(REPOS_DIR, name)
settings.repos.append(RepoSettingsRepository(name, path, url))
save_settings(settings)
@gitactivity.command()
@click.argument("name")
def delete(name):
settings = get_settings()
repo_to_delete = None
for repo in settings.repo:
if repo.name == name:
repo_to_delete = repo
break
else:
print("Cannot delete repo with name {}".format(name))
sys.exit(1)
settings.repos.remove(repo_to_delete)
shutil.rmtree(repo_to_delete.path)
@gitactivity.command()
def summarize():
settings = get_settings()
commits_in_range = set()
now = datetime.datetime.now()
for repo_meta in settings.repos:
commits = set()
repo = git.repo.Repo(repo_meta.path)
for ref in repo.refs:
for commit in repo.iter_commits(ref):
commits.add(commit)
for commit in sorted(commits, key=lambda c: c.authored_date):
authored_dt = datetime.datetime.fromtimestamp(commit.authored_date)
if now - authored_dt < datetime.timedelta(days=8):
commits_in_range.add((repo_meta.name, commit))
for repo, commit in sorted(commits_in_range, key=lambda (r,c): c.authored_date):
print repo, "|", time.ctime(commit.authored_date), "|", commit.summary
if __name__ == "__main__":
gitactivity()
| {
"content_hash": "1b904741848ab74371cc315b8a213234",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 84,
"avg_line_length": 24.640287769784173,
"alnum_prop": 0.5997080291970803,
"repo_name": "posborne/git-repohistory",
"id": "15c1dd2d77051f012840576efdb46f4a2e57e5e8",
"size": "3452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gitactivity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3452"
}
],
"symlink_target": ""
} |
import pika
import time
def callback(ch, method, properties, body):
print 'received:%s' %(body, )
time.sleep(5)
print 'done'
ch.basic_ack(delivery_tag = method.delivery_tag)
conn = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = conn.channel()
channel.queue_declare(queue = 'task_queue')
channel.basic_consume(callback, queue = 'task_queue')
print "Ctrl + c to stop..."
channel.start_consuming()
| {
"content_hash": "2a48826f922624d147ac60009b47b201",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 70,
"avg_line_length": 19.47826086956522,
"alnum_prop": 0.7053571428571429,
"repo_name": "IvanJobs/play",
"id": "ad87d7c1a3265072a0a8d9f02ff1ed92951814a4",
"size": "471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rabbitmq/workqueues/worker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4036"
},
{
"name": "C++",
"bytes": "5495"
},
{
"name": "Go",
"bytes": "4929"
},
{
"name": "JavaScript",
"bytes": "2622"
},
{
"name": "Python",
"bytes": "75354"
},
{
"name": "Shell",
"bytes": "1164"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django.contrib.auth.models import User as User_model
from django.contrib.auth import login
from django.http import HttpResponse
#from django.template import Context
from maps import settings
from maps import forms
def getUserIdFromUserName(userName):
user = User_model.objects.get(username=userName)
#TODO error handling...
return user.pk
# Create your views here.
def landing(request):
if request.user.is_authenticated():
return render(request, "landing-session.html")
return render(request,"landing-base.html")
#Main entry to SPA
def profileViewer(request,userName):
userId = getUserIdFromUserName(userName)
#TODO check if user exists. Return error otherwise.
if request.user.is_authenticated():
return render(request,"profile-base-session.html",
context={'userId':userId,
'userName':userName,
'mapboxToken':settings.mapboxToken,
'mapboxMap':settings.mapboxMap})
return render(request,"profile-base.html",context={'userId':userId,'userName':userName,'mapboxToken':settings.mapboxToken,'mapboxMap':settings.mapboxMap})
def editorViewer(request):
context = {'mapboxToken':settings.mapboxToken,
'mapboxMap':settings.mapboxMap}
return render(request,"editor-base.html",context=context)
def registration(request):
if request.method == 'POST':
form = forms.RegistrationForm(request.POST)
if form.is_valid():
newUser = form.save()
#assign session
login(request,newUser)
return redirect("/")
else:
print("invalid shit")
print(form.is_valid())
return HttpResponse("Registration error")
else:
form = forms.RegistrationForm()
return render(request,"registration/registration_form.html",{'form':form})
| {
"content_hash": "8d28dce6a0705e6b21360d214ebe291d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 158,
"avg_line_length": 32.85245901639344,
"alnum_prop": 0.6516966067864272,
"repo_name": "agilman/django-maps",
"id": "0714b8e6713ea9b21ac003727997c5383276c325",
"size": "2004",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "maps/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "113359"
},
{
"name": "HTML",
"bytes": "34272"
},
{
"name": "JavaScript",
"bytes": "4537"
},
{
"name": "Python",
"bytes": "9574"
}
],
"symlink_target": ""
} |
from find_faces.server import serve
serve() | {
"content_hash": "0b501cabf8922865595ae045798823f8",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 35,
"avg_line_length": 14.666666666666666,
"alnum_prop": 0.7954545454545454,
"repo_name": "Dixneuf19/fuzzy-octo-disco",
"id": "8cf83642af744d0decf6ee99749b16569bd28a44",
"size": "44",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22388"
}
],
"symlink_target": ""
} |
"""
Scheduler Service
"""
from oslo.config import cfg
from oslo import messaging
from cinder import context
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder import quota
from cinder import rpc
from cinder.scheduler.flows import create_volume
from cinder.volume import rpcapi as volume_rpcapi
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
default='cinder.scheduler.filter_scheduler.'
'FilterScheduler',
help='Default scheduler driver to use')
CONF = cfg.CONF
CONF.register_opt(scheduler_driver_opt)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
class SchedulerManager(manager.Manager):
"""Chooses a host to create volumes."""
RPC_API_VERSION = '1.5'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, scheduler_driver=None, service_name=None,
*args, **kwargs):
if not scheduler_driver:
scheduler_driver = CONF.scheduler_driver
if scheduler_driver in ['cinder.scheduler.chance.ChanceScheduler',
'cinder.scheduler.simple.SimpleScheduler']:
scheduler_driver = ('cinder.scheduler.filter_scheduler.'
'FilterScheduler')
LOG.deprecated(_('ChanceScheduler and SimpleScheduler have been '
'deprecated due to lack of support for advanced '
'features like: volume types, volume encryption,'
' QoS etc. These two schedulers can be fully '
'replaced by FilterScheduler with certain '
'combination of filters and weighers.'))
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
def init_host(self):
ctxt = context.get_admin_context()
self.request_service_capabilities(ctxt)
def update_service_capabilities(self, context, service_name=None,
host=None, capabilities=None, **kwargs):
"""Process a capability update from a service node."""
if capabilities is None:
capabilities = {}
self.driver.update_service_capabilities(service_name,
host,
capabilities)
def create_volume(self, context, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None):
try:
flow_engine = create_volume.get_flow(context,
db, self.driver,
request_spec,
filter_properties,
volume_id,
snapshot_id,
image_id)
except Exception:
LOG.exception(_("Failed to create scheduler manager volume flow"))
raise exception.CinderException(
_("Failed to create scheduler manager volume flow"))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
def request_service_capabilities(self, context):
volume_rpcapi.VolumeAPI().publish_service_capabilities(context)
def migrate_volume_to_host(self, context, topic, volume_id, host,
force_host_copy, request_spec,
filter_properties=None):
"""Ensure that the host exists and can accept the volume."""
def _migrate_volume_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'migration_status': None}}
self._set_volume_state_and_notify('migrate_volume_to_host',
volume_state,
context, ex, request_spec)
try:
tgt_host = self.driver.host_passes_filters(context, host,
request_spec,
filter_properties)
except exception.NoValidHost as ex:
_migrate_volume_set_error(self, context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
_migrate_volume_set_error(self, context, ex, request_spec)
else:
volume_ref = db.volume_get(context, volume_id)
volume_rpcapi.VolumeAPI().migrate_volume(context, volume_ref,
tgt_host,
force_host_copy)
def retype(self, context, topic, volume_id,
request_spec, filter_properties=None):
"""Schedule the modification of a volume's type.
:param context: the request context
:param topic: the topic listened on
:param volume_id: the ID of the volume to retype
:param request_spec: parameters for this retype request
:param filter_properties: parameters to filter by
"""
def _retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations):
if reservations:
QUOTAS.rollback(context, reservations)
if (volume_ref['instance_uuid'] is None and
volume_ref['attached_host'] is None):
orig_status = 'available'
else:
orig_status = 'in-use'
volume_state = {'volume_state': {'status': orig_status}}
self._set_volume_state_and_notify('retype', volume_state,
context, ex, request_spec, msg)
volume_ref = db.volume_get(context, volume_id)
reservations = request_spec.get('quota_reservations')
new_type = request_spec.get('volume_type')
if new_type is None:
msg = _('New volume type not specified in request_spec.')
ex = exception.ParameterNotFound(param='volume_type')
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations)
# Default migration policy is 'never'
migration_policy = request_spec.get('migration_policy')
if not migration_policy:
migration_policy = 'never'
try:
tgt_host = self.driver.find_retype_host(context, request_spec,
filter_properties,
migration_policy)
except exception.NoValidHost as ex:
msg = (_("Could not find a host for volume %(volume_id)s with "
"type %(type_id)s.") %
{'type_id': new_type['id'], 'volume_id': volume_id})
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations)
except Exception as ex:
with excutils.save_and_reraise_exception():
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, None, reservations)
else:
volume_rpcapi.VolumeAPI().retype(context, volume_ref,
new_type['id'], tgt_host,
migration_policy, reservations)
def manage_existing(self, context, topic, volume_id,
request_spec, filter_properties=None):
"""Ensure that the host exists and can accept the volume."""
def _manage_existing_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'status': 'error'}}
self._set_volume_state_and_notify('manage_existing', volume_state,
context, ex, request_spec)
volume_ref = db.volume_get(context, volume_id)
try:
self.driver.host_passes_filters(context,
volume_ref['host'],
request_spec,
filter_properties)
except exception.NoValidHost as ex:
_manage_existing_set_error(self, context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
_manage_existing_set_error(self, context, ex, request_spec)
else:
volume_rpcapi.VolumeAPI().manage_existing(context, volume_ref,
request_spec.get('ref'))
def _set_volume_state_and_notify(self, method, updates, context, ex,
request_spec, msg=None):
# TODO(harlowja): move into a task that just does this later.
if not msg:
msg = (_("Failed to schedule_%(method)s: %(ex)s") %
{'method': method, 'ex': ex})
LOG.error(msg)
volume_state = updates['volume_state']
properties = request_spec.get('volume_properties', {})
volume_id = request_spec.get('volume_id', None)
if volume_id:
db.volume_update(context, volume_id, volume_state)
payload = dict(request_spec=request_spec,
volume_properties=properties,
volume_id=volume_id,
state=volume_state,
method=method,
reason=ex)
rpc.get_notifier("scheduler").error(context,
'scheduler.' + method,
payload)
| {
"content_hash": "bbc9a73176810532e11f338dc18e6f01",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 78,
"avg_line_length": 45.34070796460177,
"alnum_prop": 0.5251293061383819,
"repo_name": "theanalyst/cinder",
"id": "663ef89777329ab6b49005aae3f97f039c860ecb",
"size": "11021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/scheduler/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from statsmodels.compat.python import iterkeys, itervalues, zip, range
from statsmodels.stats.correlation_tools import cov_nearest
import numpy as np
import pandas as pd
from scipy import linalg as spl
from collections import defaultdict
from statsmodels.tools.sm_exceptions import (ConvergenceWarning, OutputWarning,
NotImplementedWarning)
import warnings
"""
Some details for the covariance calculations can be found in the Stata
docs:
http://www.stata.com/manuals13/xtxtgee.pdf
"""
class CovStruct(object):
"""
A base class for correlation and covariance structures of grouped
data.
Each implementation of this class takes the residuals from a
regression model that has been fitted to grouped data, and uses
them to estimate the within-group dependence structure of the
random errors in the model.
The state of the covariance structure is represented through the
value of the class variable `dep_params`. The default state of a
newly-created instance should correspond to the identity
correlation matrix.
"""
def __init__(self, cov_nearest_method="clipped"):
# Parameters describing the dependency structure
self.dep_params = None
# Keep track of the number of times that the covariance was
# adjusted.
self.cov_adjust = []
# Method for projecting the covariance matrix if it not SPD.
self.cov_nearest_method = cov_nearest_method
def initialize(self, model):
"""
Called by GEE, used by implementations that need additional
setup prior to running `fit`.
Parameters
----------
model : GEE class
A reference to the parent GEE class instance.
"""
self.model = model
def update(self, params):
"""
Updates the association parameter values based on the current
regression coefficients.
Parameters
----------
params : array-like
Working values for the regression parameters.
"""
raise NotImplementedError
def covariance_matrix(self, endog_expval, index):
"""
Returns the working covariance or correlation matrix for a
given cluster of data.
Parameters
----------
endog_expval: array-like
The expected values of endog for the cluster for which the
covariance or correlation matrix will be returned
index: integer
The index of the cluster for which the covariane or
correlation matrix will be returned
Returns
-------
M: matrix
The covariance or correlation matrix of endog
is_cor: bool
True if M is a correlation matrix, False if M is a
covariance matrix
"""
raise NotImplementedError
def covariance_matrix_solve(self, expval, index, stdev, rhs):
"""
Solves matrix equations of the form `covmat * soln = rhs` and
returns the values of `soln`, where `covmat` is the covariance
matrix represented by this class.
Parameters
----------
expval: array-like
The expected value of endog for each observed value in the
group.
index: integer
The group index.
stdev : array-like
The standard deviation of endog for each observation in
the group.
rhs : list/tuple of array-like
A set of right-hand sides; each defines a matrix equation
to be solved.
Returns
-------
soln : list/tuple of array-like
The solutions to the matrix equations.
Notes
-----
Returns None if the solver fails.
Some dependence structures do not use `expval` and/or `index`
to determine the correlation matrix. Some families
(e.g. binomial) do not use the `stdev` parameter when forming
the covariance matrix.
If the covariance matrix is singular or not SPD, it is
projected to the nearest such matrix. These projection events
are recorded in the fit_history member of the GEE model.
Systems of linear equations with the covariance matrix as the
left hand side (LHS) are solved for different right hand sides
(RHS); the LHS is only factorized once to save time.
This is a default implementation, it can be reimplemented in
subclasses to optimize the linear algebra according to the
struture of the covariance matrix.
"""
vmat, is_cor = self.covariance_matrix(expval, index)
if is_cor:
vmat *= np.outer(stdev, stdev)
# Factor the covariance matrix. If the factorization fails,
# attempt to condition it into a factorizable matrix.
threshold = 1e-2
success = False
cov_adjust = 0
for itr in range(20):
try:
vco = spl.cho_factor(vmat)
success = True
break
except np.linalg.LinAlgError:
vmat = cov_nearest(vmat, method=self.cov_nearest_method,
threshold=threshold)
threshold *= 2
cov_adjust += 1
self.cov_adjust.append(cov_adjust)
# Last resort if we still can't factor the covariance matrix.
if not success:
warnings.warn(
"Unable to condition covariance matrix to an SPD "
"matrix using cov_nearest", ConvergenceWarning)
vmat = np.diag(np.diag(vmat))
vco = spl.cho_factor(vmat)
soln = [spl.cho_solve(vco, x) for x in rhs]
return soln
def summary(self):
"""
Returns a text summary of the current estimate of the
dependence structure.
"""
raise NotImplementedError
class Independence(CovStruct):
"""
An independence working dependence structure.
"""
# Nothing to update
def update(self, params):
return
def covariance_matrix(self, expval, index):
dim = len(expval)
return np.eye(dim, dtype=np.float64), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
v = stdev ** 2
rslt = []
for x in rhs:
if x.ndim == 1:
rslt.append(x / v)
else:
rslt.append(x / v[:, None])
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Observations within a cluster are modeled "
"as being independent.")
class Exchangeable(CovStruct):
"""
An exchangeable working dependence structure.
"""
def __init__(self):
super(Exchangeable, self).__init__()
# The correlation between any two values in the same cluster
self.dep_params = 0.
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
varfunc = self.model.family.variance
cached_means = self.model.cached_means
has_weights = self.model.weights is not None
weights_li = self.model.weights
residsq_sum, scale = 0, 0
fsum1, fsum2, n_pairs = 0., 0., 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
f = weights_li[i] if has_weights else 1.
ssr = np.sum(resid * resid)
scale += f * ssr
fsum1 += f * len(endog[i])
residsq_sum += f * (resid.sum() ** 2 - ssr) / 2
ngrp = len(resid)
npr = 0.5 * ngrp * (ngrp - 1)
fsum2 += f * npr
n_pairs += npr
ddof = self.model.ddof_scale
scale /= (fsum1 * (nobs - ddof) / float(nobs))
residsq_sum /= scale
self.dep_params = residsq_sum / \
(fsum2 * (n_pairs - ddof) / float(n_pairs))
def covariance_matrix(self, expval, index):
dim = len(expval)
dp = self.dep_params * np.ones((dim, dim), dtype=np.float64)
np.fill_diagonal(dp, 1)
return dp, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
k = len(expval)
c = self.dep_params / (1. - self.dep_params)
c /= 1. + self.dep_params * (k - 1)
rslt = []
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
y = x1 / (1. - self.dep_params)
y -= c * sum(x1)
y /= stdev
else:
x1 = x / stdev[:, None]
y = x1 / (1. - self.dep_params)
y -= c * x1.sum(0)
y /= stdev[:, None]
rslt.append(y)
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("The correlation between two observations in the " +
"same cluster is %.3f" % self.dep_params)
class Nested(CovStruct):
"""
A nested working dependence structure.
A working dependence structure that captures a nested hierarchy of
groups, each level of which contributes to the random error term
of the model.
When using this working covariance structure, `dep_data` of the
GEE instance should contain a n_obs x k matrix of 0/1 indicators,
corresponding to the k subgroups nested under the top-level
`groups` of the GEE instance. These subgroups should be nested
from left to right, so that two observations with the same value
for column j of `dep_data` should also have the same value for all
columns j' < j (this only applies to observations in the same
top-level cluster given by the `groups` argument to GEE).
Examples
--------
Suppose our data are student test scores, and the students are in
classrooms, nested in schools, nested in school districts. The
school district is the highest level of grouping, so the school
district id would be provided to GEE as `groups`, and the school
and classroom id's would be provided to the Nested class as the
`dep_data` argument, e.g.
0 0 # School 0, classroom 0, student 0
0 0 # School 0, classroom 0, student 1
0 1 # School 0, classroom 1, student 0
0 1 # School 0, classroom 1, student 1
1 0 # School 1, classroom 0, student 0
1 0 # School 1, classroom 0, student 1
1 1 # School 1, classroom 1, student 0
1 1 # School 1, classroom 1, student 1
Labels lower in the hierarchy are recycled, so that student 0 in
classroom 0 is different fro student 0 in classroom 1, etc.
Notes
-----
The calculations for this dependence structure involve all pairs
of observations within a group (that is, within the top level
`group` structure passed to GEE). Large group sizes will result
in slow iterations.
The variance components are estimated using least squares
regression of the products r*r', for standardized residuals r and
r' in the same group, on a vector of indicators defining which
variance components are shared by r and r'.
"""
def initialize(self, model):
"""
Called on the first call to update
`ilabels` is a list of n_i x n_i matrices containing integer
labels that correspond to specific correlation parameters.
Two elements of ilabels[i] with the same label share identical
variance components.
`designx` is a matrix, with each row containing dummy
variables indicating which variance components are associated
with the corresponding element of QY.
"""
super(Nested, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for nested cov_struct, "
"using unweighted covariance estimate",
NotImplementedWarning)
# A bit of processing of the nest data
id_matrix = np.asarray(self.model.dep_data)
if id_matrix.ndim == 1:
id_matrix = id_matrix[:, None]
self.id_matrix = id_matrix
endog = self.model.endog_li
designx, ilabels = [], []
# The number of layers of nesting
n_nest = self.id_matrix.shape[1]
for i in range(self.model.num_group):
ngrp = len(endog[i])
glab = self.model.group_labels[i]
rix = self.model.group_indices[glab]
# Determine the number of common variance components
# shared by each pair of observations.
ix1, ix2 = np.tril_indices(ngrp, -1)
ncm = (self.id_matrix[rix[ix1], :] ==
self.id_matrix[rix[ix2], :]).sum(1)
# This is used to construct the working correlation
# matrix.
ilabel = np.zeros((ngrp, ngrp), dtype=np.int32)
ilabel[[ix1, ix2]] = ncm + 1
ilabel[[ix2, ix1]] = ncm + 1
ilabels.append(ilabel)
# This is used to estimate the variance components.
dsx = np.zeros((len(ix1), n_nest + 1), dtype=np.float64)
dsx[:, 0] = 1
for k in np.unique(ncm):
ii = np.flatnonzero(ncm == k)
dsx[ii, 1:k + 1] = 1
designx.append(dsx)
self.designx = np.concatenate(designx, axis=0)
self.ilabels = ilabels
svd = np.linalg.svd(self.designx, 0)
self.designx_u = svd[0]
self.designx_s = svd[1]
self.designx_v = svd[2].T
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
dim = len(params)
if self.designx is None:
self._compute_design(self.model)
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dvmat = []
scale = 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
ix1, ix2 = np.tril_indices(len(resid), -1)
dvmat.append(resid[ix1] * resid[ix2])
scale += np.sum(resid ** 2)
dvmat = np.concatenate(dvmat)
scale /= (nobs - dim)
# Use least squares regression to estimate the variance
# components
vcomp_coeff = np.dot(self.designx_v, np.dot(self.designx_u.T,
dvmat) / self.designx_s)
self.vcomp_coeff = np.clip(vcomp_coeff, 0, np.inf)
self.scale = scale
self.dep_params = self.vcomp_coeff.copy()
def covariance_matrix(self, expval, index):
dim = len(expval)
# First iteration
if self.dep_params is None:
return np.eye(dim, dtype=np.float64), True
ilabel = self.ilabels[index]
c = np.r_[self.scale, np.cumsum(self.vcomp_coeff)]
vmat = c[ilabel]
vmat /= self.scale
return vmat, True
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
"""
Returns a summary string describing the state of the
dependence structure.
"""
msg = "Variance estimates\n------------------\n"
for k in range(len(self.vcomp_coeff)):
msg += "Component %d: %.3f\n" % (k + 1, self.vcomp_coeff[k])
msg += "Residual: %.3f\n" % (self.scale -
np.sum(self.vcomp_coeff))
return msg
class Stationary(CovStruct):
"""
A stationary covariance structure.
The correlation between two observations is an arbitrary function
of the distance between them. Distances up to a given maximum
value are included in the covariance model.
Parameters
----------
max_lag : float
The largest distance that is included in the covariance model.
grid : bool
If True, the index positions in the data (after dropping missing
values) are used to define distances, and the `time` variable is
ignored.
"""
def __init__(self, max_lag=1, grid=False):
super(Stationary, self).__init__()
self.max_lag = max_lag
self.grid = grid
self.dep_params = np.zeros(max_lag)
def initialize(self, model):
super(Stationary, self).initialize(model)
# Time used as an index needs to be integer type.
if not self.grid:
time = self.model.time[:, 0].astype(np.int32)
self.time = self.model.cluster_list(time)
def update(self, params):
if self.grid:
self.update_grid(params)
else:
self.update_nogrid(params)
def update_grid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
dep_params[0] += np.sum(resid * resid) / len(resid)
for j in range(1, self.max_lag + 1):
dep_params[j] += np.sum(resid[0:-j] *
resid[j:]) / len(resid[j:])
self.dep_params = dep_params[1:] / dep_params[0]
def update_nogrid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
dn = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
j1, j2 = np.tril_indices(len(expval))
dx = np.abs(self.time[i][j1] - self.time[i][j2])
ii = np.flatnonzero(dx <= self.max_lag)
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
vs = np.bincount(dx, weights=resid[
j1] * resid[j2], minlength=self.max_lag + 1)
vd = np.bincount(dx, minlength=self.max_lag + 1)
ii = np.flatnonzero(vd > 0)
dn[ii] += 1
if len(ii) > 0:
dep_params[ii] += vs[ii] / vd[ii]
dep_params /= dn
self.dep_params = dep_params[1:] / dep_params[0]
def covariance_matrix(self, endog_expval, index):
if self.grid:
return self.covariance_matrix_grid(endog_expval, index)
j1, j2 = np.tril_indices(len(endog_expval))
dx = np.abs(self.time[index][j1] - self.time[index][j2])
ii = np.flatnonzero((0 < dx) & (dx <= self.max_lag))
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
cmat = np.eye(len(endog_expval))
cmat[j1, j2] = self.dep_params[dx - 1]
cmat[j2, j1] = self.dep_params[dx - 1]
return cmat, True
def covariance_matrix_grid(self, endog_expval, index):
from scipy.linalg import toeplitz
r = np.zeros(len(endog_expval))
r[0] = 1
r[1:self.max_lag + 1] = self.dep_params
return toeplitz(r), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
if not self.grid:
return super(Stationary, self).covariance_matrix_solve(
expval, index, stdev, rhs)
from statsmodels.tools.linalg import stationary_solve
r = np.zeros(len(expval))
r[0:self.max_lag] = self.dep_params
return [stationary_solve(r, x) for x in rhs]
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Stationary dependence parameters\n",
self.dep_params)
class Autoregressive(CovStruct):
"""
A first-order autoregressive working dependence structure.
The dependence is defined in terms of the `time` component of the
parent GEE class, which defaults to the index position of each
value within its cluster, based on the order of values in the
input data set. Time represents a potentially multidimensional
index from which distances between pairs of observations can be
determined.
The correlation between two observations in the same cluster is
dep_params^distance, where `dep_params` contains the (scalar)
autocorrelation parameter to be estimated, and `distance` is the
distance between the two observations, calculated from their
corresponding time values. `time` is stored as an n_obs x k
matrix, where `k` represents the number of dimensions in the time
index.
The autocorrelation parameter is estimated using weighted
nonlinear least squares, regressing each value within a cluster on
each preceeding value in the same cluster.
Parameters
----------
dist_func: function from R^k x R^k to R^+, optional
A function that computes the distance between the two
observations based on their `time` values.
References
----------
B Rosner, A Munoz. Autoregressive modeling for the analysis of
longitudinal data with unequally spaced examinations. Statistics
in medicine. Vol 7, 59-71, 1988.
"""
def __init__(self, dist_func=None):
super(Autoregressive, self).__init__()
# The function for determining distances based on time
if dist_func is None:
self.dist_func = lambda x, y: np.abs(x - y).sum()
else:
self.dist_func = dist_func
self.designx = None
# The autocorrelation parameter
self.dep_params = 0.
def update(self, params):
if self.model.weights is not None:
warnings.warn("weights not implemented for autoregressive "
"cov_struct, using unweighted covariance estimate",
NotImplementedWarning)
endog = self.model.endog_li
time = self.model.time_li
# Only need to compute this once
if self.designx is not None:
designx = self.designx
else:
designx = []
for i in range(self.model.num_group):
ngrp = len(endog[i])
if ngrp == 0:
continue
# Loop over pairs of observations within a cluster
for j1 in range(ngrp):
for j2 in range(j1):
designx.append(self.dist_func(time[i][j1, :],
time[i][j2, :]))
designx = np.array(designx)
self.designx = designx
scale = self.model.estimate_scale()
varfunc = self.model.family.variance
cached_means = self.model.cached_means
# Weights
var = 1. - self.dep_params ** (2 * designx)
var /= 1. - self.dep_params ** 2
wts = 1. / var
wts /= wts.sum()
residmat = []
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(scale * varfunc(expval))
resid = (endog[i] - expval) / stdev
ngrp = len(resid)
for j1 in range(ngrp):
for j2 in range(j1):
residmat.append([resid[j1], resid[j2]])
residmat = np.array(residmat)
# Need to minimize this
def fitfunc(a):
dif = residmat[:, 0] - (a ** designx) * residmat[:, 1]
return np.dot(dif ** 2, wts)
# Left bracket point
b_lft, f_lft = 0., fitfunc(0.)
# Center bracket point
b_ctr, f_ctr = 0.5, fitfunc(0.5)
while f_ctr > f_lft:
b_ctr /= 2
f_ctr = fitfunc(b_ctr)
if b_ctr < 1e-8:
self.dep_params = 0
return
# Right bracket point
b_rgt, f_rgt = 0.75, fitfunc(0.75)
while f_rgt < f_ctr:
b_rgt = b_rgt + (1. - b_rgt) / 2
f_rgt = fitfunc(b_rgt)
if b_rgt > 1. - 1e-6:
raise ValueError(
"Autoregressive: unable to find right bracket")
from scipy.optimize import brent
self.dep_params = brent(fitfunc, brack=[b_lft, b_ctr, b_rgt])
def covariance_matrix(self, endog_expval, index):
ngrp = len(endog_expval)
if self.dep_params == 0:
return np.eye(ngrp, dtype=np.float64), True
idx = np.arange(ngrp)
cmat = self.dep_params ** np.abs(idx[:, None] - idx[None, :])
return cmat, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
# The inverse of an AR(1) covariance matrix is tri-diagonal.
k = len(expval)
soln = []
# LHS has 1 column
if k == 1:
return [x / stdev ** 2 for x in rhs]
# LHS has 2 columns
if k == 2:
mat = np.array([[1, -self.dep_params], [-self.dep_params, 1]])
mat /= (1. - self.dep_params ** 2)
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
else:
x1 = x / stdev[:, None]
x1 = np.dot(mat, x1)
if x.ndim == 1:
x1 /= stdev
else:
x1 /= stdev[:, None]
soln.append(x1)
return soln
# LHS has >= 3 columns: values c0, c1, c2 defined below give
# the inverse. c0 is on the diagonal, except for the first
# and last position. c1 is on the first and last position of
# the diagonal. c2 is on the sub/super diagonal.
c0 = (1. + self.dep_params ** 2) / (1. - self.dep_params ** 2)
c1 = 1. / (1. - self.dep_params ** 2)
c2 = -self.dep_params / (1. - self.dep_params ** 2)
soln = []
for x in rhs:
flatten = False
if x.ndim == 1:
x = x[:, None]
flatten = True
x1 = x / stdev[:, None]
z0 = np.zeros((1, x.shape[1]))
rhs1 = np.concatenate((x[1:, :], z0), axis=0)
rhs2 = np.concatenate((z0, x[0:-1, :]), axis=0)
y = c0 * x + c2 * rhs1 + c2 * rhs2
y[0, :] = c1 * x[0, :] + c2 * x[1, :]
y[-1, :] = c1 * x[-1, :] + c2 * x[-2, :]
y /= stdev[:, None]
if flatten:
y = np.squeeze(y)
soln.append(y)
return soln
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Autoregressive(1) dependence parameter: %.3f\n" %
self.dep_params)
class CategoricalCovStruct(CovStruct):
"""
Parent class for covariance structure for categorical data models.
Attributes
----------
nlevel : int
The number of distinct levels for the outcome variable.
ibd : list
A list whose i^th element ibd[i] is an array whose rows
contain integer pairs (a,b), where endog_li[i][a:b] is the
subvector of binary indicators derived from the same ordinal
value.
"""
def initialize(self, model):
super(CategoricalCovStruct, self).initialize(model)
self.nlevel = len(model.endog_values)
self._ncut = self.nlevel - 1
from numpy.lib.stride_tricks import as_strided
b = np.dtype(np.int64).itemsize
ibd = []
for v in model.endog_li:
jj = np.arange(0, len(v) + 1, self._ncut, dtype=np.int64)
jj = as_strided(jj, shape=(len(jj) - 1, 2), strides=(b, b))
ibd.append(jj)
self.ibd = ibd
class GlobalOddsRatio(CategoricalCovStruct):
"""
Estimate the global odds ratio for a GEE with ordinal or nominal
data.
References
----------
PJ Heagerty and S Zeger. "Marginal Regression Models for Clustered
Ordinal Measurements". Journal of the American Statistical
Association Vol. 91, Issue 435 (1996).
Thomas Lumley. Generalized Estimating Equations for Ordinal Data:
A Note on Working Correlation Structures. Biometrics Vol. 52,
No. 1 (Mar., 1996), pp. 354-361
http://www.jstor.org/stable/2533173
Notes
-----
The following data structures are calculated in the class:
'ibd' is a list whose i^th element ibd[i] is a sequence of integer
pairs (a,b), where endog_li[i][a:b] is the subvector of binary
indicators derived from the same ordinal value.
`cpp` is a dictionary where cpp[group] is a map from cut-point
pairs (c,c') to the indices of all between-subject pairs derived
from the given cut points.
"""
def __init__(self, endog_type):
super(GlobalOddsRatio, self).__init__()
self.endog_type = endog_type
self.dep_params = 0.
def initialize(self, model):
super(GlobalOddsRatio, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for GlobalOddsRatio "
"cov_struct, using unweighted covariance estimate",
NotImplementedWarning)
# Need to restrict to between-subject pairs
cpp = []
for v in model.endog_li:
# Number of subjects in this group
m = int(len(v) / self._ncut)
i1, i2 = np.tril_indices(m, -1)
cpp1 = {}
for k1 in range(self._ncut):
for k2 in range(k1 + 1):
jj = np.zeros((len(i1), 2), dtype=np.int64)
jj[:, 0] = i1 * self._ncut + k1
jj[:, 1] = i2 * self._ncut + k2
cpp1[(k2, k1)] = jj
cpp.append(cpp1)
self.cpp = cpp
# Initialize the dependence parameters
self.crude_or = self.observed_crude_oddsratio()
if self.model.update_dep:
self.dep_params = self.crude_or
def pooled_odds_ratio(self, tables):
"""
Returns the pooled odds ratio for a list of 2x2 tables.
The pooled odds ratio is the inverse variance weighted average
of the sample odds ratios of the tables.
"""
if len(tables) == 0:
return 1.
# Get the sampled odds ratios and variances
log_oddsratio, var = [], []
for table in tables:
lor = np.log(table[1, 1]) + np.log(table[0, 0]) -\
np.log(table[0, 1]) - np.log(table[1, 0])
log_oddsratio.append(lor)
var.append((1 / table.astype(np.float64)).sum())
# Calculate the inverse variance weighted average
wts = [1 / v for v in var]
wtsum = sum(wts)
wts = [w / wtsum for w in wts]
log_pooled_or = sum([w * e for w, e in zip(wts, log_oddsratio)])
return np.exp(log_pooled_or)
def covariance_matrix(self, expected_value, index):
vmat = self.get_eyy(expected_value, index)
vmat -= np.outer(expected_value, expected_value)
return vmat, False
def observed_crude_oddsratio(self):
"""
To obtain the crude (global) odds ratio, first pool all binary
indicators corresponding to a given pair of cut points (c,c'),
then calculate the odds ratio for this 2x2 table. The crude
odds ratio is the inverse variance weighted average of these
odds ratios. Since the covariate effects are ignored, this OR
will generally be greater than the stratified OR.
"""
cpp = self.cpp
endog = self.model.endog_li
# Storage for the contingency tables for each (c,c')
tables = {}
for ii in iterkeys(cpp[0]):
tables[ii] = np.zeros((2, 2), dtype=np.float64)
# Get the observed crude OR
for i in range(len(endog)):
# The observed joint values for the current cluster
yvec = endog[i]
endog_11 = np.outer(yvec, yvec)
endog_10 = np.outer(yvec, 1. - yvec)
endog_01 = np.outer(1. - yvec, yvec)
endog_00 = np.outer(1. - yvec, 1. - yvec)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += endog_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += endog_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += endog_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += endog_00[ix[:, 0], ix[:, 1]].sum()
return self.pooled_odds_ratio(list(itervalues(tables)))
def get_eyy(self, endog_expval, index):
"""
Returns a matrix V such that V[i,j] is the joint probability
that endog[i] = 1 and endog[j] = 1, based on the marginal
probabilities of endog and the global odds ratio `current_or`.
"""
current_or = self.dep_params
ibd = self.ibd[index]
# The between-observation joint probabilities
if current_or == 1.0:
vmat = np.outer(endog_expval, endog_expval)
else:
psum = endog_expval[:, None] + endog_expval[None, :]
pprod = endog_expval[:, None] * endog_expval[None, :]
pfac = np.sqrt((1. + psum * (current_or - 1.)) ** 2 +
4 * current_or * (1. - current_or) * pprod)
vmat = 1. + psum * (current_or - 1.) - pfac
vmat /= 2. * (current_or - 1)
# Fix E[YY'] for elements that belong to same observation
for bdl in ibd:
evy = endog_expval[bdl[0]:bdl[1]]
if self.endog_type == "ordinal":
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(evy, evy)
else:
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] = np.diag(evy)
return vmat
def update(self, params):
"""
Update the global odds ratio based on the current value of
params.
"""
cpp = self.cpp
cached_means = self.model.cached_means
# This will happen if all the clusters have only
# one observation
if len(cpp[0]) == 0:
return
tables = {}
for ii in cpp[0]:
tables[ii] = np.zeros((2, 2), dtype=np.float64)
for i in range(self.model.num_group):
endog_expval, _ = cached_means[i]
emat_11 = self.get_eyy(endog_expval, i)
emat_10 = endog_expval[:, None] - emat_11
emat_01 = -emat_11 + endog_expval
emat_00 = 1. - (emat_11 + emat_10 + emat_01)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += emat_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += emat_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += emat_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += emat_00[ix[:, 0], ix[:, 1]].sum()
cor_expval = self.pooled_odds_ratio(list(itervalues(tables)))
self.dep_params *= self.crude_or / cor_expval
if not np.isfinite(self.dep_params):
self.dep_params = 1.
warnings.warn("dep_params became inf, resetting to 1",
ConvergenceWarning)
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
return "Global odds ratio: %.3f\n" % self.dep_params
class OrdinalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for ordinal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(ev, ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class NominalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for nominal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.diag(ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class Equivalence(CovStruct):
"""
A covariance structure defined in terms of equivalence classes.
An 'equivalence class' is a set of pairs of observations such that
the covariance of every pair within the equivalence class has a
common value.
Parameters
----------
pairs : dict-like
A dictionary of dictionaries, where `pairs[group][label]`
provides the indices of all pairs of observations in the group
that have the same covariance value. Specifically,
`pairs[group][label]` is a tuple `(j1, j2)`, where `j1` and `j2`
are integer arrays of the same length. `j1[i], j2[i]` is one
index pair that belongs to the `label` equivalence class. Only
one triangle of each covariance matrix should be included.
Positions where j1 and j2 have the same value are variance
parameters.
labels : array-like
An array of labels such that every distinct pair of labels
defines an equivalence class. Either `labels` or `pairs` must
be provided. When the two labels in a pair are equal two
equivalence classes are defined: one for the diagonal elements
(corresponding to variances) and one for the off-diagonal
elements (corresponding to covariances).
return_cov : boolean
If True, `covariance_matrix` returns an estimate of the
covariance matrix, otherwise returns an estimate of the
correlation matrix.
Notes
-----
Using `labels` to define the class is much easier than using
`pairs`, but is less general.
Any pair of values not contained in `pairs` will be assigned zero
covariance.
The index values in `pairs` are row indices into the `exog`
matrix. They are not updated if missing data are present. When
using this covariance structure, missing data should be removed
before constructing the model.
If using `labels`, after a model is defined using the covariance
structure it is possible to remove a label pair from the second
level of the `pairs` dictionary to force the corresponding
covariance to be zero.
Examples
--------
The following sets up the `pairs` dictionary for a model with two
groups, equal variance for all observations, and constant
covariance for all pairs of observations within each group.
>> pairs = {0: {}, 1: {}}
>> pairs[0][0] = (np.r_[0, 1, 2], np.r_[0, 1, 2])
>> pairs[0][1] = np.tril_indices(3, -1)
>> pairs[1][0] = (np.r_[3, 4, 5], np.r_[3, 4, 5])
>> pairs[1][2] = 3 + np.tril_indices(3, -1)
"""
def __init__(self, pairs=None, labels=None, return_cov=False):
super(Equivalence, self).__init__()
if (pairs is None) and (labels is None):
raise ValueError(
"Equivalence cov_struct requires either `pairs` or `labels`")
if (pairs is not None) and (labels is not None):
raise ValueError(
"Equivalence cov_struct accepts only one of `pairs` "
"and `labels`")
if pairs is not None:
import copy
self.pairs = copy.deepcopy(pairs)
if labels is not None:
self.labels = np.asarray(labels)
self.return_cov = return_cov
def _make_pairs(self, i, j):
"""
Create arrays containing all unique ordered pairs of i, j.
The arrays i and j must be one-dimensional containing non-negative
integers.
"""
mat = np.zeros((len(i) * len(j), 2), dtype=np.int32)
# Create the pairs and order them
f = np.ones(len(j))
mat[:, 0] = np.kron(f, i).astype(np.int32)
f = np.ones(len(i))
mat[:, 1] = np.kron(j, f).astype(np.int32)
mat.sort(1)
# Remove repeated rows
try:
dtype = np.dtype((np.void, mat.dtype.itemsize * mat.shape[1]))
bmat = np.ascontiguousarray(mat).view(dtype)
_, idx = np.unique(bmat, return_index=True)
except TypeError:
# workaround for old numpy that can't call unique with complex
# dtypes
rs = np.random.RandomState(4234)
bmat = np.dot(mat, rs.uniform(size=mat.shape[1]))
_, idx = np.unique(bmat, return_index=True)
mat = mat[idx, :]
return mat[:, 0], mat[:, 1]
def _pairs_from_labels(self):
from collections import defaultdict
pairs = defaultdict(lambda: defaultdict(lambda: None))
model = self.model
df = pd.DataFrame({"labels": self.labels, "groups": model.groups})
gb = df.groupby(["groups", "labels"])
ulabels = np.unique(self.labels)
for g_ix, g_lb in enumerate(model.group_labels):
# Loop over label pairs
for lx1 in range(len(ulabels)):
for lx2 in range(lx1 + 1):
lb1 = ulabels[lx1]
lb2 = ulabels[lx2]
try:
i1 = gb.groups[(g_lb, lb1)]
i2 = gb.groups[(g_lb, lb2)]
except KeyError:
continue
i1, i2 = self._make_pairs(i1, i2)
clabel = str(lb1) + "/" + str(lb2)
# Variance parameters belong in their own equiv class.
jj = np.flatnonzero(i1 == i2)
if len(jj) > 0:
clabelv = clabel + "/v"
pairs[g_lb][clabelv] = (i1[jj], i2[jj])
# Covariance parameters
jj = np.flatnonzero(i1 != i2)
if len(jj) > 0:
i1 = i1[jj]
i2 = i2[jj]
pairs[g_lb][clabel] = (i1, i2)
self.pairs = pairs
def initialize(self, model):
super(Equivalence, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for equalence cov_struct, "
"using unweighted covariance estimate",
NotImplementedWarning)
if not hasattr(self, 'pairs'):
self._pairs_from_labels()
# Initialize so that any equivalence class containing a
# variance parameter has value 1.
self.dep_params = defaultdict(lambda: 0.)
self._var_classes = set([])
for gp in self.model.group_labels:
for lb in self.pairs[gp]:
j1, j2 = self.pairs[gp][lb]
if np.any(j1 == j2):
if not np.all(j1 == j2):
warnings.warn(
"equivalence class contains both variance "
"and covariance parameters", OutputWarning)
self._var_classes.add(lb)
self.dep_params[lb] = 1
# Need to start indexing at 0 within each group.
# rx maps olds indices to new indices
rx = -1 * np.ones(len(self.model.endog), dtype=np.int32)
for g_ix, g_lb in enumerate(self.model.group_labels):
ii = self.model.group_indices[g_lb]
rx[ii] = np.arange(len(ii), dtype=np.int32)
# Reindex
for gp in self.model.group_labels:
for lb in self.pairs[gp].keys():
a, b = self.pairs[gp][lb]
self.pairs[gp][lb] = (rx[a], rx[b])
def update(self, params):
endog = self.model.endog_li
varfunc = self.model.family.variance
cached_means = self.model.cached_means
dep_params = defaultdict(lambda: [0., 0., 0.])
n_pairs = defaultdict(lambda: 0)
dim = len(params)
for k, gp in enumerate(self.model.group_labels):
expval, _ = cached_means[k]
stdev = np.sqrt(varfunc(expval))
resid = (endog[k] - expval) / stdev
for lb in self.pairs[gp].keys():
if (not self.return_cov) and lb in self._var_classes:
continue
jj = self.pairs[gp][lb]
dep_params[lb][0] += np.sum(resid[jj[0]] * resid[jj[1]])
if not self.return_cov:
dep_params[lb][1] += np.sum(resid[jj[0]] ** 2)
dep_params[lb][2] += np.sum(resid[jj[1]] ** 2)
n_pairs[lb] += len(jj[0])
if self.return_cov:
for lb in dep_params.keys():
dep_params[lb] = dep_params[lb][0] / (n_pairs[lb] - dim)
else:
for lb in dep_params.keys():
den = np.sqrt(dep_params[lb][1] * dep_params[lb][2])
dep_params[lb] = dep_params[lb][0] / den
for lb in self._var_classes:
dep_params[lb] = 1.
self.dep_params = dep_params
self.n_pairs = n_pairs
def covariance_matrix(self, expval, index):
dim = len(expval)
cmat = np.zeros((dim, dim))
g_lb = self.model.group_labels[index]
for lb in self.pairs[g_lb].keys():
j1, j2 = self.pairs[g_lb][lb]
cmat[j1, j2] = self.dep_params[lb]
cmat = cmat + cmat.T
np.fill_diagonal(cmat, cmat.diagonal() / 2)
return cmat, not self.return_cov
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
| {
"content_hash": "dfccb2da7f84ece85cf2b72774b99fc3",
"timestamp": "",
"source": "github",
"line_count": 1413,
"max_line_length": 79,
"avg_line_length": 33.552016985138,
"alnum_prop": 0.5624670421228037,
"repo_name": "bert9bert/statsmodels",
"id": "2416c38526e44049aee09e7decd3e67f7095242d",
"size": "47409",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "statsmodels/genmod/cov_struct.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "2609"
},
{
"name": "Python",
"bytes": "9844784"
},
{
"name": "R",
"bytes": "55204"
},
{
"name": "Stata",
"bytes": "54989"
}
],
"symlink_target": ""
} |
import sys
import os
import signal
def write_stdout(s):
sys.stdout.write(s)
sys.stdout.flush()
def write_stderr(s):
sys.stderr.write(s)
sys.stderr.flush()
def main():
while 1:
write_stdout('READY\n')
line = sys.stdin.readline()
write_stdout('This line kills supervisor: ' + line);
try:
pidfile = open('/var/run/supervisord.pid','r')
pid = int(pidfile.readline());
os.kill(pid, signal.SIGQUIT)
except Exception as e:
write_stdout('Could not kill supervisor: ' + e.strerror + '\n')
write_stdout('RESULT 2\nOK')
if __name__ == '__main__':
main()
import sys | {
"content_hash": "7a51b06c0b3edfdf13ee325002d6acb5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 27.08,
"alnum_prop": 0.5731166912850812,
"repo_name": "kristophjunge/docker-mediawiki",
"id": "99f9a43ddf37e8f23966cc23f03b9282a5eab61e",
"size": "699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/supervisor/kill_supervisor.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "7927"
},
{
"name": "Python",
"bytes": "699"
},
{
"name": "Shell",
"bytes": "2483"
}
],
"symlink_target": ""
} |
import sys
sys.path.insert(0, "/usr/local/opencv-2.4.11/lib/python2.7/site-packages/")
import argparse
import commands
import cv2
import fnmatch
import json
import math
import numpy as np
import os.path
from progress.bar import Bar
import scipy.spatial
sys.path.append('../lib')
import Matcher
import Pose
import ProjectMgr
import SRTM
# Rest all match point locations to their original direct
# georeferenced locations based on estimated camera pose and
# projection onto DEM earth surface
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')
args = parser.parse_args()
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
print "Converting match_list save format to 'pickle' ..."
for image in proj.image_list:
image.load_matches()
image.save_matches()
| {
"content_hash": "5402036e7c20df3df2d05cb23f295926",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 24.885714285714286,
"alnum_prop": 0.7761194029850746,
"repo_name": "UASLab/ImageAnalysis",
"id": "0e5732d905c5a9923aeca870992a36c957f42c1e",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/0-pickle-matches.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "2077"
},
{
"name": "Python",
"bytes": "1805747"
}
],
"symlink_target": ""
} |
"""Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.test_support
import test.string_tests
import test.buffer_tests
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.test_support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxint])
self.assertRaises(IndexError, lambda: b[sys.maxint+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxint])
self.assertRaises(IndexError, lambda: b[-sys.maxint-1])
self.assertRaises(IndexError, lambda: b[-sys.maxint-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
# allowed in 2.x
#self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxint])
self.assertRaises(ValueError, self.type2test, [sys.maxint+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character sizes
self.assertEqual(self.type2test(b"\0a\0b\0c") == u"abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == u"abc", False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == u"abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == u"abc", False)
self.assertEqual(self.type2test() == unicode(), False)
self.assertEqual(self.type2test() != unicode(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
#XXX: Jython doesn't support codepoints outside of the UTF-16 range even at
# parse time. Maybe someday we might push the error off to later, but for
# now I'm just commenting this whole test out.
# See http://bugs.jython.org/issue1836 for more.
# def test_encoding(self):
# sample = u"Hello world\n\u1234\u5678\u9abc\udef0"
# for enc in ("utf8", "utf16"):
# b = self.type2test(sample, enc)
# self.assertEqual(b, self.type2test(sample.encode(enc)))
# self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin1")
# b = self.type2test(sample, "latin1", "ignore")
# self.assertEqual(b, self.type2test(sample[:-4], "utf-8"))
def test_decode(self):
sample = u"Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = u"Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf8"),
"Hello world\n")
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + u"def")
self.assertRaises(TypeError, lambda: u"abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
self.assertRaises((OverflowError, MemoryError),
lambda: b * sys.maxsize)
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: u"a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(u''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30, 0xca, 0xfe, 0xba, 0xbe]) # challenging signs
self.assertEqual(self.type2test.fromhex(u'1a2B30CafEBabe'), b)
self.assertEqual(self.type2test.fromhex(u' 1A 2B 30 CafeBabe '), b)
self.assertEqual(self.type2test.fromhex(u'0000'), b'\0\0')
self.assertRaises(ValueError, self.type2test.fromhex, u'a')
self.assertRaises(ValueError, self.type2test.fromhex, u'rt')
self.assertRaises(ValueError, self.type2test.fromhex, u'1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, u'\x00')
self.assertRaises(ValueError, self.type2test.fromhex, u'12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_count(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
def test_find(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
def test_rfind(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
def test_index(self):
b = self.type2test(b'world')
self.assertEqual(b.index(b'w'), 0)
self.assertEqual(b.index(b'orl'), 1)
self.assertRaises(ValueError, b.index, b'worm')
self.assertRaises(ValueError, b.index, b'ldo')
def test_rindex(self):
# XXX could be more rigorous
b = self.type2test(b'world')
self.assertEqual(b.rindex(b'w'), 0)
self.assertEqual(b.rindex(b'orl'), 1)
self.assertRaises(ValueError, b.rindex, b'worm')
self.assertRaises(ValueError, b.rindex, b'ldo')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, u' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, u' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, u'b')
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegexp(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
def test_translate(self):
# adapted from AssortedBytesTest.test_translate
b = self.type2test(b'hello')
rosetta = self.type2test().join(map(chr,range(256)))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = b.translate(None, b'the larch')
self.assertEqual(c, b'o')
stone = self.type2test(''.join(map(chr,range(1,256))))
self.assertRaises(ValueError, b.translate, stone, b'short')
self.assertRaises(TypeError, b.translate, rosetta, None)
self.assertRaises(TypeError, b.translate, None, None)
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
# Python 2.x
b_sample = (ord(s) for s in sample)
self.assertEqual(list(b), list(b_sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(r"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += u""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertTrue(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(ord, orig * 25))
a.extend(ord(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove(u'e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(u'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(u'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
# allowed in 2.x
#self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
if test.test_support.is_jython:
# Show that releasing v releases the bytearray for size change
v.release()
b.pop()
def test_empty_bytearray(self):
# Issue #7561: operations on empty bytearrays could crash in many
# situations, due to a fragile implementation of the
# PyByteArray_AS_STRING() C macro.
self.assertRaises(ValueError, int, bytearray(b''))
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(".")[0]', 'val.rpartition(".")[2]',
'val.splitlines()[0]', 'val.replace("", "")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super(FixedStringTest, self).fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
def test_hash(self):
# XXX check this out
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
class ByteArraySubclass(bytearray):
pass
class ByteArraySubclassTest(unittest.TestCase):
def test_basic(self):
self.assertTrue(issubclass(ByteArraySubclass, bytearray))
self.assertIsInstance(ByteArraySubclass(), bytearray)
a, b = b"abcd", b"efgh"
_a, _b = ByteArraySubclass(a), ByteArraySubclass(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = ByteArraySubclass(b"abcd")
s2 = bytearray().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is bytearray, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is bytearray)
def test_pickle(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_init_override(self):
class subclass(bytearray):
def __init__(self, newarg=1, *args, **kwargs):
bytearray.__init__(self, *args, **kwargs)
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
def test_main():
#test.test_support.run_unittest(BytesTest)
#test.test_support.run_unittest(AssortedBytesTest)
#test.test_support.run_unittest(BytesAsStringTest)
test.test_support.run_unittest(
ByteArrayTest,
ByteArrayAsStringTest,
ByteArraySubclassTest,
BytearrayPEP3137Test)
if __name__ == "__main__":
test_main()
| {
"content_hash": "9cc15c5aded894a169185a9e96c3d9d0",
"timestamp": "",
"source": "github",
"line_count": 1145,
"max_line_length": 96,
"avg_line_length": 38.61659388646288,
"alnum_prop": 0.5703365297629818,
"repo_name": "p4datasystems/CarnotKE",
"id": "0668d868900b0df18e3c0573046d0527ad6b0a5e",
"size": "44216",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "jyhton/Lib/test/test_bytes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1605"
},
{
"name": "Batchfile",
"bytes": "47777"
},
{
"name": "C",
"bytes": "2514"
},
{
"name": "CSS",
"bytes": "2404"
},
{
"name": "GAP",
"bytes": "130755"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "3930320"
},
{
"name": "Java",
"bytes": "14582708"
},
{
"name": "Makefile",
"bytes": "2261"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Perl",
"bytes": "19642"
},
{
"name": "Python",
"bytes": "26645929"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "62509"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "436870"
}
],
"symlink_target": ""
} |
"""A simple, hierarchical distributed counter."""
import threading
import time
from typing import Dict, Mapping, Optional, Union
from acme import core
Number = Union[int, float]
class Counter(core.Saveable):
"""A simple counter object that can periodically sync with a parent."""
def __init__(self,
parent: Optional['Counter'] = None,
prefix: str = '',
time_delta: float = 1.0,
return_only_prefixed: bool = False):
"""Initialize the counter.
Args:
parent: a Counter object to cache locally (or None for no caching).
prefix: string prefix to use for all local counts.
time_delta: time difference in seconds between syncing with the parent
counter.
return_only_prefixed: if True, and if `prefix` isn't empty, return counts
restricted to the given `prefix` on each call to `increment` and
`get_counts`. The `prefix` is stripped from returned count names.
"""
self._parent = parent
self._prefix = prefix
self._time_delta = time_delta
# Hold local counts and we'll lock around that.
# These are counts to be synced to the parent and the cache.
self._counts = {}
self._lock = threading.Lock()
# We'll sync periodically (when the last sync was more than self._time_delta
# seconds ago.)
self._cache = {}
self._last_sync_time = 0.0
self._return_only_prefixed = return_only_prefixed
def increment(self, **counts: Number) -> Dict[str, Number]:
"""Increment a set of counters.
Args:
**counts: keyword arguments specifying count increments.
Returns:
The [name, value] mapping of all counters stored, i.e. this will also
include counts that were not updated by this call to increment.
"""
with self._lock:
for key, value in counts.items():
self._counts.setdefault(key, 0)
self._counts[key] += value
return self.get_counts()
def get_counts(self) -> Dict[str, Number]:
"""Return all counts tracked by this counter."""
now = time.time()
# TODO(b/144421838): use futures instead of blocking.
if self._parent and (now - self._last_sync_time) > self._time_delta:
with self._lock:
counts = _prefix_keys(self._counts, self._prefix)
# Reset the local counts, as they will be merged into the parent and the
# cache.
self._counts = {}
self._cache = self._parent.increment(**counts)
self._last_sync_time = now
# Potentially prefix the keys in the counts dictionary.
counts = _prefix_keys(self._counts, self._prefix)
# If there's no prefix make a copy of the dictionary so we don't modify the
# internal self._counts.
if not self._prefix:
counts = dict(counts)
# Combine local counts with any parent counts.
for key, value in self._cache.items():
counts[key] = counts.get(key, 0) + value
if self._prefix and self._return_only_prefixed:
counts = dict([(key[len(self._prefix) + 1:], value)
for key, value in counts.items()
if key.startswith(f'{self._prefix}_')])
return counts
def save(self) -> Mapping[str, Mapping[str, Number]]:
return {'counts': self._counts, 'cache': self._cache}
def restore(self, state: Mapping[str, Mapping[str, Number]]):
# Force a sync, if necessary, on the next get_counts call.
self._last_sync_time = 0.
self._counts = state['counts']
self._cache = state['cache']
def get_steps_key(self) -> str:
"""Returns the key to use for steps by this counter."""
if not self._prefix or self._return_only_prefixed:
return 'steps'
return f'{self._prefix}_steps'
def _prefix_keys(dictionary: Dict[str, Number], prefix: str):
"""Return a dictionary with prefixed keys.
Args:
dictionary: dictionary to return a copy of.
prefix: string to use as the prefix.
Returns:
Return a copy of the given dictionary whose keys are replaced by
"{prefix}_{key}". If the prefix is the empty string it returns the given
dictionary unchanged.
"""
if prefix:
dictionary = {f'{prefix}_{k}': v for k, v in dictionary.items()}
return dictionary
| {
"content_hash": "acbd0790c0bb03f28adf06010749c57c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 80,
"avg_line_length": 33.728,
"alnum_prop": 0.6416034155597723,
"repo_name": "deepmind/acme",
"id": "8492b3d45688ecfcadcd2a4c974da67e499dcee1",
"size": "4832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acme/utils/counting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2182865"
},
{
"name": "Shell",
"bytes": "2668"
}
],
"symlink_target": ""
} |
"""
tests.components.sensor.command_sensor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests command sensor.
"""
import unittest
import homeassistant.core as ha
from homeassistant.components.sensor import command_sensor
class TestCommandSensorSensor(unittest.TestCase):
""" Test the Template sensor. """
def setUp(self):
self.hass = ha.HomeAssistant()
def tearDown(self):
""" Stop down stuff we started. """
self.hass.stop()
def test_setup(self):
""" Test sensor setup """
config = {'name': 'Test',
'unit_of_measurement': 'in',
'command': 'echo 5'}
devices = []
def add_dev_callback(devs):
""" callback to add device """
for dev in devs:
devices.append(dev)
command_sensor.setup_platform(
self.hass, config, add_dev_callback)
self.assertEqual(1, len(devices))
entity = devices[0]
self.assertEqual('Test', entity.name)
self.assertEqual('in', entity.unit_of_measurement)
self.assertEqual('5', entity.state)
def test_setup_bad_config(self):
""" Test setup with a bad config """
config = {}
devices = []
def add_dev_callback(devs):
""" callback to add device """
for dev in devs:
devices.append(dev)
self.assertFalse(command_sensor.setup_platform(
self.hass, config, add_dev_callback))
self.assertEqual(0, len(devices))
def test_template(self):
""" Test command sensor with template """
data = command_sensor.CommandSensorData('echo 50')
entity = command_sensor.CommandSensor(
self.hass, data, 'test', 'in', '{{ value | multiply(0.1) }}')
self.assertEqual(5, float(entity.state))
def test_bad_command(self):
""" Test bad command """
data = command_sensor.CommandSensorData('asdfasdf')
data.update()
self.assertEqual(None, data.value)
| {
"content_hash": "94aa94202a5d0332f1ae27395374d8d1",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 73,
"avg_line_length": 27.16,
"alnum_prop": 0.5689739813451153,
"repo_name": "Theb-1/home-assistant",
"id": "ae6c9452d3f9067963dbf5c10656ea13acd840d7",
"size": "2037",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/sensor/test_command_sensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1474810"
},
{
"name": "Python",
"bytes": "1660180"
},
{
"name": "Shell",
"bytes": "4592"
}
],
"symlink_target": ""
} |
import unittest
from pythonbacktest.indicatorcalculator import IndicatorsCalculatorPerfMonitor
class IndicatorsCalculatorPerfMonitorTests(unittest.TestCase):
sample_indicator_name_1 = "indicator_1"
sample_indicator_name_2 = "indicator_2"
def test_no_data(self):
perf_monitor = IndicatorsCalculatorPerfMonitor()
performance_stats = perf_monitor.performance_stats
self.assertFalse(list(performance_stats))
def test_single_indicator(self):
sample_data = [1, 2, 3, 4]
perf_monitor = IndicatorsCalculatorPerfMonitor()
for single_data in sample_data:
perf_monitor.report_execution_time(self.sample_indicator_name_1, single_data)
performance_stats = list(perf_monitor.performance_stats)
# (avg, min, max)
self.assertEqual((self.sample_indicator_name_1, (2.5, 1, 4, 2.5, 1.1180339887498949)), performance_stats[0])
def test_two_indicators(self):
sample_data_1 = [5, 5, 5, 5]
sample_data_2 = [5, 6, 7, 8]
perf_monitor = IndicatorsCalculatorPerfMonitor()
for record_1, record_2 in zip(sample_data_1, sample_data_2):
perf_monitor.report_execution_time(self.sample_indicator_name_1, record_1)
perf_monitor.report_execution_time(self.sample_indicator_name_2, record_2)
performance_stats = list(perf_monitor.performance_stats)
# (avg, min, max)
self.assertEqual((self.sample_indicator_name_1, (5, 5, 5, 5, 0)), performance_stats[0])
self.assertEqual((self.sample_indicator_name_2, (6.5, 5, 8, 6.5, 1.1180339887498949)), performance_stats[1])
| {
"content_hash": "ca4a885713aa2bb4cdda981edd86e600",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 116,
"avg_line_length": 38.02325581395349,
"alnum_prop": 0.6764525993883792,
"repo_name": "quantwizard-com/pythonbacktest",
"id": "77a7f085418151a0b89da9c908778859b774b9e3",
"size": "1635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testcases/indicatorcalculator_tests/indicatorscalculatorperfmonitortests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "340439"
},
{
"name": "Python",
"bytes": "144332"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
} |
import unittest
from streamlink.plugins.metube import MeTube
class TestPluginMeTube(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://www.metube.id/live/METROTV',
'https://www.metube.id/live/GTV',
'https://www.metube.id/videos/16881738/yudi_28_bogor_-amazingakak',
'https://www.metube.id/videos/16873428/liverpool-vs-psg-3-2-goals-and-highlights-2018',
]
for url in should_match:
self.assertTrue(MeTube.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://www.metube.id/me/IMAA2018',
'https://www.metube.id/auditions',
'https://www.twitch.tv/twitch'
]
for url in should_not_match:
self.assertFalse(MeTube.can_handle_url(url))
| {
"content_hash": "af4d30e72ab45e72d4030c63ddcce5d4",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 99,
"avg_line_length": 35.791666666666664,
"alnum_prop": 0.610011641443539,
"repo_name": "beardypig/streamlink",
"id": "0848dac8fde35e25e23a96afe2aaebf0fd9e424e",
"size": "859",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/plugins/test_metube.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1538432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
('body', models.TextField()),
('timestamp', models.DateTimeField()),
],
),
]
| {
"content_hash": "66ccaf875ddddfcefd45ca5701e641b8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 25.304347826086957,
"alnum_prop": 0.5532646048109966,
"repo_name": "mude918/aws_site01",
"id": "09d99000077e7f9f1441e76723623e51f284b845",
"size": "655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "1595"
},
{
"name": "CSS",
"bytes": "274088"
},
{
"name": "HTML",
"bytes": "168138"
},
{
"name": "Java",
"bytes": "606"
},
{
"name": "JavaScript",
"bytes": "1740798"
},
{
"name": "PHP",
"bytes": "19961"
},
{
"name": "Python",
"bytes": "19722"
},
{
"name": "Shell",
"bytes": "247"
}
],
"symlink_target": ""
} |
import psycopg2
import sys
def time_it(fn):
def wrapped(*args):
import time
start = time.time()
fn(*args)
used = time.time() - start
print "%s used %s" % (str(fn), used)
return wrapped
@time_it
def matrix_vector_multiply(conn, m1, m2, result):
cur = conn.cursor()
cur.execute("delete from %s" % result)
cur.execute("insert into %s select A.row, sum(A.val * B.val) from %s as A, %s as B where A.col = B.id group by A.row" % (result, m1, m2))
conn.commit()
cur.close()
def create_matrix(conn, table_name):
cur = conn.cursor()
drop_if_exists(conn, table_name)
cur.execute("create table %s (row int, col int, val float)" % table_name)
conn.commit()
cur.close()
def drop_if_exists(conn, table_name):
cur = conn.cursor()
cur.execute("drop table if exists %s" % table_name)
conn.commit()
cur.close()
def out_degree(conn, edge_table, degree_table, weighted):
cur = conn.cursor()
drop_if_exists(conn, degree_table)
cur.execute("create table %s(id int, degree int)" % degree_table)
if weighted == False:
cur.execute("insert into %s select src_id, count(*) from %s group by src_id" % (degree_table, edge_table))
else:
cur.execute("insert into %s select src_id, sum(weight) from %s group by src_id" % (degree_table, edge_table))
conn.commit()
cur.close()
@time_it
def vector_add(conn, m1, m2):
'''
result is updated to m1
'''
cur = conn.cursor()
#cur.execute('update %s set val = A.val + B.val from %s as A, %s as B where A.id = B.id' %(m1, m1, m2))
cur.execute('delete from tmp');
cur.execute('insert into tmp select A.id, A.val + B.val from %s as A, %s as B where A.id = B.id' % (m1, m2))
cur.execute('delete from %s' % m1)
cur.execute('insert into %s select * from tmp' % m1)
conn.commit()
cur.close()
def create_vector(conn, tbl_name):
cur = conn.cursor()
drop_if_exists(conn, tbl_name)
cur.execute('create table %s(id int, val float)' % tbl_name)
conn.commit()
@time_it
def rand_init_matrix(conn, matrix, edge_table):
cur = conn.cursor()
cur.execute('insert into %s select src_id, rnd_prior() from %s group by src_id' % (matrix, edge_table))
conn.commit()
def is_stablized(conn, belief, belief_new, threshold = 0.000001):
cur = conn.cursor()
cur.execute('select sqrt(sum(power(B.val - A.val, 2))) from %s as A, %s as B where A.id = B.id group by A.id' % (belief, belief_new))
diff = cur.fetchone()[0]
if diff < threshold:
return True
else:
return False
def create_rnd_init(conn):
fcn_def = \
"""
CREATE or REPLACE function rnd_prior()
returns float
AS
$$
DECLARE
retval float := 0;
rnd_val float;
BEGIN
rnd_val = random();
if rnd_val > 0.95 then
retval = 0.001;
elsif rnd_val < 0.05 then
retval = -0.001;
end if;
return retval;
END;
$$
language plpgsql
"""
cur = conn.cursor()
cur.execute(fcn_def)
conn.commit()
def summarize(conn, final_belief):
cur = conn.cursor()
cur.execute("select count(*) from %s where val > 0" % final_belief)
pos = cur.fetchone()[0]
cur.execute("select count(*) from %s where val = 0" % final_belief)
zero = cur.fetchone()[0]
cur.execute("select count(*) from %s where val < 0" % final_belief)
neg = cur.fetchone()[0]
print "postive: %d" % pos
print "zero: %d" % zero
print "negative: %d" % neg
def compute_bp(conn, edge_table, target_table, weighted = False):
create_rnd_init(conn);
cur = conn.cursor()
h = 0.002
a = 4.0 * (h ** 2) / (1 - 4 * h ** 2)
c = 2.0 * h / (1 - 4 * h ** 2)
W = "W"
#W_new = "W_new"
prior = "prior"
belief = "bp_" + target_table
belief_new = "belief_new"
degree_table = "degree_table"
out_degree(conn, edge_table, degree_table, weighted)
create_matrix(conn, W)
#create_matrix(conn, W_new)
create_vector(conn, belief)
create_vector(conn, belief_new)
create_vector(conn, prior)
create_vector(conn, 'tmp')
rand_init_matrix(conn, prior, edge_table)
if weighted == True:
cur.execute("insert into %s select src_id, dst_id, weight * %f from %s" % (W, c, edge_table))
else:
cur.execute("insert into %s select src_id, dst_id, %f from %s" % (W, c, edge_table))
conn.commit()
cur.execute("insert into %s select id, id, -%f * degree from %s" % (W, a, degree_table))
cur.execute("drop index if exists w_index")
cur.execute("create index w_index on %s(col)" % W)
cur.execute("drop index if exists prior_index");
cur.execute("create index prior_index on %s(id)" % prior)
conn.commit()
print "initialized"
matrix_vector_multiply(conn, W, prior, belief)
max_iteration = 10
for i in range(max_iteration):
print "iteration %d" % i
#cur.execute("delete from %s" % W_new)
matrix_vector_multiply(conn, W, belief, belief_new)
vector_add(conn, belief_new, prior)
if(is_stablized(conn, belief, belief_new)):
break
cur.execute("delete from %s" % belief)
cur.execute("insert into %s select * from %s" % (belief, belief_new))
conn.commit()
summarize(conn, belief)
if __name__ == "__main__":
conn = psycopg2.connect(database="mydb", host="127.0.0.1")
compute_bp(conn, sys.argv[1], sys.argv[2])
conn.close()
| {
"content_hash": "c2306f39c8511b5a295e08d86c74be51",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 138,
"avg_line_length": 27.933701657458563,
"alnum_prop": 0.6481408227848101,
"repo_name": "spininertia/graph-mining-rdbms",
"id": "8c0560b905a9ddbb091e1cc588661175318e74c0",
"size": "5056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/phase-3/src/bp/bp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106216"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
} |
""" Core functions of the updater. """
import os
import re
import zipfile
import psutil
import sys
from . import helpers
# Authorship information.
__author__ = "Sébastien Mathieu"
__version__ = "1.0.0"
def create_archive(path, output_path=None, ignore_list=None, package_name=None, to_remove=None,
compression=zipfile.ZIP_DEFLATED):
""" Create a zip archive.
:param path: Path to the folder to create archive from.
:param output_path: Path to the output folder.
:param ignore_list: List of regular expression pattern applied to the full path.
:param package_name: Name of the package to create.
:param compression: Compression of the archive used by the python zipfile class.
:type path: str
:type output_path: str
:type ignore_list: iterable
:type package_name: str
:type to_remove: iterable
:type compression: constant
"""
# Prepare ignore list
if ignore_list is None:
ignore_list = []
regex_list = [re.compile(e) for e in ignore_list]
# Prepare package name
if package_name is None:
package_name = "%s.zip" % os.path.basename(path)
package_name = os.path.normpath(package_name)
# Prepare base path
base_path = os.path.dirname(path)
# Open archive for writing
archive_path = os.path.join(output_path, package_name) if output_path is not None else package_name
if os.path.isfile(archive_path):
os.remove(archive_path)
with zipfile.ZipFile(archive_path, 'w', compression=compression) as out:
# Iterate on the source path
for subdir, dirs, files in os.walk(path):
for file in files:
file_path = os.path.join(subdir, file) # Relative path to the current working directory
file_name = os.path.relpath(file_path, base_path) # Name in the archive
# Filters
if any(map(lambda r: r.search(file_path), regex_list)):
continue # Skip file
# Write file to archive
out.write(file_path, file_name)
# Add list of files to remove
if to_remove is not None:
out.writestr(helpers.TO_REMOVE_FILE, "\n".join(to_remove))
# Add list of files to ignore
if len(ignore_list) > 0:
out.writestr(helpers.IGNORE_LIST_FILE, "\n".join(ignore_list))
def apply_archive(in_path, out_path, backup_path='.'):
""" Apply an update archive.
:param in_path: Input archive path.
:param out_path: Output directory path.
:param backup_path: Path of the backup archive. If none, no backup is performed.
:type in_path: str
:type out_path: str
:type backup_path: str
"""
# Extract all files
with zipfile.ZipFile(in_path) as archive:
apply_archive(archive, out_path, backup_path)
def apply_zipfile(archive, out_path, backup_path='.'):
""" Apply an update archive.
:param archive: Input archive.
:param out_path: Output directory path.
:param backup_path: Path of the backup archive. If none, no backup is performed.
:type archive: zipfile.ZipFIle
:type out_path: str
:type backup_path: str
"""
if backup_path is not None:
_backup(archive, out_path, backup_path)
archive.extractall(out_path, filter(lambda n: n not in helpers.RESERVED_FILES, archive.namelist()))
# Remove files
try:
to_remove = archive.read(helpers.TO_REMOVE_FILE).decode().split('\n')
for n in to_remove:
try:
os.remove('%s/%s' % (out_path, n))
except FileNotFoundError:
pass # File already removed
except KeyError: # No file to remove since no list is provided
pass
def _backup(archive, out_path, backup_path):
""" Backup before applying the archive.
:param archive: Input archive.
:param out_path: Target output path to backup.
:param backup_path: Path where to write the backup archive.
:type archive: zipfile.ZipFile
:type out_path: str
:type backup_path: str
"""
# Fetch the ignore list from the input archive
try:
ignore_list = archive.read(helpers.IGNORE_LIST_FILE).decode().split('\n')
except KeyError: # No file to remove since no list is provided
ignore_list = None
# Backup
create_archive(out_path, output_path=backup_path, ignore_list=ignore_list, package_name='backup.zip')
def restart_program():
"""Restarts the current program, with file objects and descriptors
cleanup.
Source: https://stackoverflow.com/questions/11329917/restart-python-script-from-within-itself
Original author: s3ni0r
"""
try:
p = psutil.Process(os.getpid())
for handler in p.open_files() + p.connections():
os.close(handler.fd)
except Exception as e:
print(e, file=sys.stderr)
python = sys.executable
os.execl(python, python, *sys.argv)
| {
"content_hash": "2d119caa1a407391f9c92b453b925d1e",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 105,
"avg_line_length": 32.72549019607843,
"alnum_prop": 0.6351108448172559,
"repo_name": "sebMathieu/crude-user-updater",
"id": "36506787ca5631e9f57ca0a0f92caf98b67d7451",
"size": "5032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "updater/core.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "8203"
}
],
"symlink_target": ""
} |
"""
This module defines the atom types that are available for representing
molecular functional groups and substructure patterns. Each available atom type
is defined as an instance of the :class:`AtomType` class. The atom types
themselves are available in the ``atomTypes`` module-level variable, or as
the return value from the :meth:`getAtomType()` method.
If you want to change which atom types are available in RMG and/or what they
represent, this should be the only module you need to change to do so.
"""
import cython
################################################################################
class AtomTypeError(Exception):
"""
An exception to be raised when an error occurs while working with atom
types. Pass a string describing the circumstances that caused the
exceptional behavior.
"""
pass
################################################################################
class AtomType:
"""
A class for internal representation of atom types. Using unique objects
rather than strings allows us to use fast pointer comparisons instead of
slow string comparisons, as well as store extra metadata. In particular,
we store metadata describing the atom type's hierarchy with regard to other
atom types, and the atom types that can result when various actions
involving this atom type are taken. The attributes are:
=================== =================== ====================================
Attribute Type Description
=================== =================== ====================================
`label` ``str`` A unique label for the atom type
`generic` ``list`` The atom types that are more generic than this one
`specific` ``list`` The atom types that are more specific than this one
`incrementBond` ``list`` The atom type(s) that result when an adjacent bond's order is incremented
`decrementBond` ``list`` The atom type(s) that result when an adjacent bond's order is decremented
`formBond` ``list`` The atom type(s) that result when a new single bond is formed to this atom type
`breakBond` ``list`` The atom type(s) that result when an existing single bond to this atom type is broken
`incrementRadical` ``list`` The atom type(s) that result when the number of radical electrons is incremented
`decrementRadical` ``list`` The atom type(s) that result when the number of radical electrons is decremented
`incrementLonePair` ``list`` The atom type(s) that result when the number of lone electron pairs is incremented
`decrementLonePair` ``list`` The atom type(s) that result when the number of lone electron pairs is decremented
=================== =================== ====================================
"""
def __init__(self, label='', generic=None, specific=None):
self.label = label
self.generic = generic or []
self.specific = specific or []
self.incrementBond = []
self.decrementBond = []
self.formBond = []
self.breakBond = []
self.incrementRadical = []
self.decrementRadical = []
self.incrementLonePair = []
self.decrementLonePair = []
def __repr__(self):
return '<AtomType "%s">' % self.label
def __reduce__(self):
"""
A helper function used when pickling an AtomType object.
"""
d = {
'label': self.label,
'generic': self.generic,
'specific': self.specific,
'incrementBond': self.incrementBond,
'decrementBond': self.decrementBond,
'formBond': self.formBond,
'breakBond': self.breakBond,
'incrementRadical': self.incrementRadical,
'decrementRadical': self.decrementRadical,
'incrementLonePair': self.incrementLonePair,
'decrementLonePair': self.decrementLonePair,
}
return (AtomType, (), d)
def __setstate__(self, d):
"""
A helper function used when unpickling an AtomType object.
"""
self.label = d['label']
self.generic = d['generic']
self.specific = d['specific']
self.incrementBond = d['incrementBond']
self.decrementBond = d['decrementBond']
self.formBond = d['formBond']
self.breakBond = d['breakBond']
self.incrementRadical = d['incrementRadical']
self.decrementRadical = d['decrementRadical']
self.incrementLonePair = d['incrementLonePair']
self.decrementLonePair = d['decrementLonePair']
def setActions(self, incrementBond, decrementBond, formBond, breakBond, incrementRadical, decrementRadical, incrementLonePair, decrementLonePair):
self.incrementBond = incrementBond
self.decrementBond = decrementBond
self.formBond = formBond
self.breakBond = breakBond
self.incrementRadical = incrementRadical
self.decrementRadical = decrementRadical
self.incrementLonePair = incrementLonePair
self.decrementLonePair = decrementLonePair
def equivalent(self, other):
"""
Returns ``True`` if two atom types `atomType1` and `atomType2` are
equivalent or ``False`` otherwise. This function respects wildcards,
e.g. ``R!H`` is equivalent to ``C``.
"""
return self is other or self in other.specific or other in self.specific
def isSpecificCaseOf(self, other):
"""
Returns ``True`` if atom type `atomType1` is a specific case of
atom type `atomType2` or ``False`` otherwise.
"""
return self is other or self in other.specific
################################################################################
"""
Note: function to read adjacency lists assumes that all atom types begin
with a capital letter [A-Z]
"""
atomTypes = {}
atomTypes['R'] = AtomType(label='R', generic=[], specific=[
'R!H',
'Val4','Val5','Val6','Val7',
'H','He',
'C','Cs','Cd','Cdd','Ct','CO','Cb','Cbf','CS',
'N','N1d','N3s','N3d','N3t','N3b','N5s','N5d','N5dd','N5t','N5b',
'O','Os','Od','Oa','Ot',
'Ne',
'Si','Sis','Sid','Sidd','Sit','SiO','Sib','Sibf',
'S','Ss','Sd','Sa',
'Cl','Ar']
)
atomTypes['R!H'] = AtomType(label='R!H', generic=['R'], specific=[
'He',
'Val4','Val5','Val6','Val7',
'C','Cs','Cd','Cdd','Ct','CO','Cb','Cbf','CS',
'N','N1d','N3s','N3d','N3t','N3b','N5s','N5d','N5dd','N5t','N5b',
'O','Os','Od','Oa','Ot',
'Ne',
'Si','Sis','Sid','Sidd','Sit','SiO','Sib','Sibf',
'S','Ss','Sd','Sa',
'Cl','Ar']
)
atomTypes['Val4'] = AtomType(label='Val4', generic=['R','R!H'], specific=[
'C','Cs','Cd','Cdd','Ct','CO','Cb','Cbf','CS',
'Si','Sis','Sid','Sidd','Sit','SiO','Sib','Sibf']
)
atomTypes['Val5'] = AtomType(label='Val5', generic=['R','R!H'], specific=[
'N','N1d','N3s','N3d','N3t','N3b','N5s','N5d','N5dd','N5t','N5b']
)
atomTypes['Val6'] = AtomType(label='Val6', generic=['R','R!H'], specific=[
'O','Os','Od','Oa','Ot',
'S','Ss','Sd','Sa']
)
atomTypes['Val7'] = AtomType(label='Val7', generic=['R','R!H'], specific=[
'Cl']
)
atomTypes['H' ] = AtomType('H', generic=['R'], specific=[])
atomTypes['He' ] = AtomType('He', generic=['R','R!H'], specific=[])
atomTypes['C' ] = AtomType('C', generic=['R','R!H','Val4'], specific=['Cs','Cd','Cdd','Ct','CO','Cb','Cbf','CS'])
atomTypes['Cs' ] = AtomType('Cs', generic=['R','R!H','C','Val4'], specific=[])
atomTypes['Cd' ] = AtomType('Cd', generic=['R','R!H','C','Val4'], specific=[])
atomTypes['Cdd' ] = AtomType('Cdd', generic=['R','R!H','C','Val4'], specific=[])
atomTypes['Ct' ] = AtomType('Ct', generic=['R','R!H','C','Val4'], specific=[])
atomTypes['CO' ] = AtomType('CO', generic=['R','R!H','C','Val4'], specific=[])
atomTypes['Cb' ] = AtomType('Cb', generic=['R','R!H','C','Val4'], specific=[])
atomTypes['Cbf' ] = AtomType('Cbf', generic=['R','R!H','C','Val4'], specific=[])
atomTypes['CS' ] = AtomType('CS', generic=['R','R!H','C','Val4'], specific=[])
atomTypes['N' ] = AtomType('N', generic=['R','R!H','Val5'], specific=['N1d','N3s','N3d','N3t','N3b','N5s','N5d','N5dd','N5t','N5b'])
atomTypes['N1d' ] = AtomType('N1d', generic=['R','R!H','N','Val5'], specific=[])
atomTypes['N3s' ] = AtomType('N3s', generic=['R','R!H','N','Val5'], specific=[])
atomTypes['N3d' ] = AtomType('N3d', generic=['R','R!H','N','Val5'], specific=[])
atomTypes['N3t' ] = AtomType('N3t', generic=['R','R!H','N','Val5'], specific=[])
atomTypes['N3b' ] = AtomType('N3b', generic=['R','R!H','N','Val5'], specific=[])
atomTypes['N5s' ] = AtomType('N5s', generic=['R','R!H','N','Val5'], specific=[])
atomTypes['N5d' ] = AtomType('N5d', generic=['R','R!H','N','Val5'], specific=[])
atomTypes['N5dd'] = AtomType('N5dd', generic=['R','R!H','N','Val5'], specific=[])
atomTypes['N5t' ] = AtomType('N5t', generic=['R','R!H','N','Val5'], specific=[])
atomTypes['N5b' ] = AtomType('N5b', generic=['R','R!H','N','Val5'], specific=[])
atomTypes['O' ] = AtomType('O', generic=['R','R!H','Val6'], specific=['Os','Od','Oa','Ot'])
atomTypes['Os' ] = AtomType('Os', generic=['R','R!H','O','Val6'], specific=[])
atomTypes['Od' ] = AtomType('Od', generic=['R','R!H','O','Val6'], specific=[])
atomTypes['Oa' ] = AtomType('Oa', generic=['R','R!H','O','Val6'], specific=[])
atomTypes['Ot' ] = AtomType('Ot', generic=['R','R!H','O','Val6'], specific=[])
atomTypes['Ne' ] = AtomType('Ne', generic=['R','R!H'], specific=[])
atomTypes['Si' ] = AtomType('Si', generic=['R','R!H','Val4'], specific=['Sis','Sid','Sidd','Sit','SiO','Sib','Sibf'])
atomTypes['Sis' ] = AtomType('Sis', generic=['R','R!H','Si','Val4'], specific=[])
atomTypes['Sid' ] = AtomType('Sid', generic=['R','R!H','Si','Val4'], specific=[])
atomTypes['Sidd'] = AtomType('Sidd', generic=['R','R!H','Si','Val4'], specific=[])
atomTypes['Sit' ] = AtomType('Sit', generic=['R','R!H','Si','Val4'], specific=[])
atomTypes['SiO' ] = AtomType('SiO', generic=['R','R!H','Si','Val4'], specific=[])
atomTypes['Sib' ] = AtomType('Sib', generic=['R','R!H','Si','Val4'], specific=[])
atomTypes['Sibf'] = AtomType('Sibf', generic=['R','R!H','Si','Val4'], specific=[])
atomTypes['S' ] = AtomType('S', generic=['R','R!H','Val6'], specific=['Ss','Sd','Sa'])
atomTypes['Ss' ] = AtomType('Ss', generic=['R','R!H','S','Val6'], specific=[])
atomTypes['Sd' ] = AtomType('Sd', generic=['R','R!H','S','Val6'], specific=[])
atomTypes['Sa' ] = AtomType('Sa', generic=['R','R!H','S','Val6'], specific=[])
atomTypes['Cl' ] = AtomType('Cl', generic=['R','R!H','Val7'], specific=[])
atomTypes['Ar' ] = AtomType('Ar', generic=['R','R!H'], specific=[])
atomTypes['R' ].setActions(incrementBond=['R'], decrementBond=['R'], formBond=['R'], breakBond=['R'], incrementRadical=['R'], decrementRadical=['R'], incrementLonePair=['R'], decrementLonePair=['R'])
atomTypes['R!H' ].setActions(incrementBond=['R!H'], decrementBond=['R!H'], formBond=['R!H'], breakBond=['R!H'], incrementRadical=['R!H'], decrementRadical=['R!H'], incrementLonePair=['R!H'], decrementLonePair=['R!H'])
atomTypes['Val4'].setActions(incrementBond=['Val4'], decrementBond=['Val4'], formBond=['Val4'], breakBond=['Val4'], incrementRadical=['Val4'], decrementRadical=['Val4'], incrementLonePair=['Val4'],decrementLonePair=['Val4'])
atomTypes['Val5'].setActions(incrementBond=['Val5'], decrementBond=['Val5'], formBond=['Val5'], breakBond=['Val5'], incrementRadical=['Val5'], decrementRadical=['Val5'], incrementLonePair=['Val5'],decrementLonePair=['Val5'])
atomTypes['Val6'].setActions(incrementBond=['Val6'], decrementBond=['Val6'], formBond=['Val6'], breakBond=['Val6'], incrementRadical=['Val6'], decrementRadical=['Val6'], incrementLonePair=['Val6'],decrementLonePair=['Val6'])
atomTypes['Val7'].setActions(incrementBond=['Val7'], decrementBond=['Val7'], formBond=['Val7'], breakBond=['Val7'], incrementRadical=['Val7'], decrementRadical=['Val7'], incrementLonePair=['Val7'],decrementLonePair=['Val7'])
atomTypes['H' ].setActions(incrementBond=[], decrementBond=[], formBond=['H'], breakBond=['H'], incrementRadical=['H'], decrementRadical=['H'], incrementLonePair=[], decrementLonePair=[])
atomTypes['He' ].setActions(incrementBond=[], decrementBond=[], formBond=[], breakBond=[], incrementRadical=['He'], decrementRadical=['He'], incrementLonePair=[], decrementLonePair=[])
atomTypes['C' ].setActions(incrementBond=['C'], decrementBond=['C'], formBond=['C'], breakBond=['C'], incrementRadical=['C'], decrementRadical=['C'], incrementLonePair=['C'], decrementLonePair=['C'])
atomTypes['Cs' ].setActions(incrementBond=['Cd','CO','Cs'], decrementBond=[], formBond=['Cs'], breakBond=['Cs'], incrementRadical=['Cs'], decrementRadical=['Cs'], incrementLonePair=['Cs'], decrementLonePair=['Cs'])
atomTypes['Cd' ].setActions(incrementBond=['Cdd','Ct'], decrementBond=['Cs'], formBond=['Cd'], breakBond=['Cd'], incrementRadical=['Cd'], decrementRadical=['Cd'], incrementLonePair=['Cd'], decrementLonePair=['Cd'])
atomTypes['Cdd' ].setActions(incrementBond=[], decrementBond=['Cd','CO','CS'], formBond=[], breakBond=[], incrementRadical=[], decrementRadical=[], incrementLonePair=[], decrementLonePair=[])
atomTypes['Ct' ].setActions(incrementBond=[], decrementBond=['Cd'], formBond=['Ct'], breakBond=['Ct'], incrementRadical=['Ct'], decrementRadical=['Ct'], incrementLonePair=['Ct'], decrementLonePair=['Ct'])
atomTypes['CO' ].setActions(incrementBond=['Cdd'], decrementBond=['Cs'], formBond=['CO'], breakBond=['CO'], incrementRadical=['CO'], decrementRadical=['CO'], incrementLonePair=['CO'], decrementLonePair=['CO'])
atomTypes['CS' ].setActions(incrementBond=['Cdd'], decrementBond=['Cs'], formBond=['CS'], breakBond=['CS'], incrementRadical=['CS'], decrementRadical=['CS'], incrementLonePair=['CS'], decrementLonePair=['CS'])
atomTypes['Cb' ].setActions(incrementBond=[], decrementBond=[], formBond=['Cb'], breakBond=['Cb'], incrementRadical=['Cb'], decrementRadical=['Cb'], incrementLonePair=[], decrementLonePair=[])
atomTypes['Cbf' ].setActions(incrementBond=[], decrementBond=[], formBond=[], breakBond=[], incrementRadical=[], decrementRadical=[], incrementLonePair=[], decrementLonePair=[])
atomTypes['N' ].setActions(incrementBond=['N'], decrementBond=['N'], formBond=['N'], breakBond=['N'], incrementRadical=['N'], decrementRadical=['N'], incrementLonePair=['N'], decrementLonePair=['N'])
atomTypes['N1d' ].setActions(incrementBond=[], decrementBond=['N3s'], formBond=[], breakBond=['N3s'], incrementRadical=['N1d'], decrementRadical=['N1d'], incrementLonePair=['N1d'], decrementLonePair=['N1d'])
atomTypes['N3s' ].setActions(incrementBond=['N3d','N3s'], decrementBond=[], formBond=['N3s','N5s'], breakBond=['N3s'], incrementRadical=['N3s'], decrementRadical=['N3s'], incrementLonePair=['N3s'], decrementLonePair=['N3s'])
atomTypes['N3d' ].setActions(incrementBond=['N3t'], decrementBond=['N3s'], formBond=['N3d','N5d'], breakBond=['N3d'], incrementRadical=['N3d'], decrementRadical=['N3d'], incrementLonePair=['N3d'], decrementLonePair=['N3d'])
atomTypes['N3t' ].setActions(incrementBond=[], decrementBond=['N3d'], formBond=['N5t'], breakBond=[], incrementRadical=['N3t'], decrementRadical=['N3t'], incrementLonePair=['N3t'], decrementLonePair=['N3t'])
atomTypes['N3b' ].setActions(incrementBond=[], decrementBond=[], formBond=['N3b'], breakBond=['N3b'], incrementRadical=['N3b'], decrementRadical=['N3b'], incrementLonePair=['N3b'], decrementLonePair=['N3b'])
atomTypes['N5s' ].setActions(incrementBond=['N5d'], decrementBond=['N3s'], formBond=['N5s'], breakBond=['N3s'], incrementRadical=['N5s'], decrementRadical=['N5s'], incrementLonePair=['N5s'], decrementLonePair=['N5s'])
atomTypes['N5d' ].setActions(incrementBond=['N5dd','N5t'], decrementBond=['N5s'], formBond=[], breakBond=['N3d'], incrementRadical=['N5d'], decrementRadical=['N5d'], incrementLonePair=['N5d'], decrementLonePair=['N5d'])
atomTypes['N5dd'].setActions(incrementBond=[], decrementBond=['N3d'], formBond=[], breakBond=[], incrementRadical=['N5dd'], decrementRadical=['N5dd'], incrementLonePair=['N5d'], decrementLonePair=['N5d'])
atomTypes['N5t' ].setActions(incrementBond=[], decrementBond=['N3d','N3t'], formBond=[], breakBond=['N3d','N3t'], incrementRadical=['N5t'], decrementRadical=['N5t'], incrementLonePair=['N5t'], decrementLonePair=['N5t'])
atomTypes['N5b' ].setActions(incrementBond=[], decrementBond=[], formBond=['N5b'], breakBond=['N5b'], incrementRadical=['N5b'], decrementRadical=['N5b'], incrementLonePair=['N5b'], decrementLonePair=['N5b'])
atomTypes['O' ].setActions(incrementBond=['O'], decrementBond=['O'], formBond=['O'], breakBond=['O'], incrementRadical=['O'], decrementRadical=['O'], incrementLonePair=['Os'], decrementLonePair=['Os'])
atomTypes['Os' ].setActions(incrementBond=['Od'], decrementBond=[], formBond=['Os'], breakBond=['Os'], incrementRadical=['Os'], decrementRadical=['Os'], incrementLonePair=['Os'], decrementLonePair=['Os'])
atomTypes['Od' ].setActions(incrementBond=[], decrementBond=['Os'], formBond=[], breakBond=[], incrementRadical=[], decrementRadical=[], incrementLonePair=['Od'], decrementLonePair=['Od'])
atomTypes['Oa' ].setActions(incrementBond=[], decrementBond=[], formBond=[], breakBond=[], incrementRadical=[], decrementRadical=[], incrementLonePair=[], decrementLonePair=[])
atomTypes['Ot' ].setActions(incrementBond=[], decrementBond=['Od'], formBond=[], breakBond=[], incrementRadical=[], decrementRadical=[], incrementLonePair=['Ot'], decrementLonePair=['Ot'])
atomTypes['Ne' ].setActions(incrementBond=[], decrementBond=[], formBond=[], breakBond=[], incrementRadical=['Ne'], decrementRadical=['Ne'], incrementLonePair=[], decrementLonePair=[])
atomTypes['Si' ].setActions(incrementBond=['Si'], decrementBond=['Si'], formBond=['Si'], breakBond=['Si'], incrementRadical=['Si'], decrementRadical=['Si'], incrementLonePair=[], decrementLonePair=[])
atomTypes['Sis' ].setActions(incrementBond=['Sid','SiO'], decrementBond=[], formBond=['Sis'], breakBond=['Sis'], incrementRadical=['Sis'], decrementRadical=['Sis'], incrementLonePair=[], decrementLonePair=[])
atomTypes['Sid' ].setActions(incrementBond=['Sidd','Sit'], decrementBond=['Sis'], formBond=['Sid'], breakBond=['Sid'], incrementRadical=['Sid'], decrementRadical=['Sid'], incrementLonePair=[], decrementLonePair=[])
atomTypes['Sidd'].setActions(incrementBond=[], decrementBond=['Sid','SiO'], formBond=[], breakBond=[], incrementRadical=[], decrementRadical=[], incrementLonePair=[], decrementLonePair=[])
atomTypes['Sit' ].setActions(incrementBond=[], decrementBond=['Sid'], formBond=['Sit'], breakBond=['Sit'], incrementRadical=['Sit'], decrementRadical=['Sit'], incrementLonePair=[], decrementLonePair=[])
atomTypes['SiO' ].setActions(incrementBond=['Sidd'], decrementBond=['Sis'], formBond=['SiO'], breakBond=['SiO'], incrementRadical=['SiO'], decrementRadical=['SiO'], incrementLonePair=[], decrementLonePair=[])
atomTypes['Sib' ].setActions(incrementBond=[], decrementBond=[], formBond=['Sib'], breakBond=['Sib'], incrementRadical=['Sib'], decrementRadical=['Sib'], incrementLonePair=[], decrementLonePair=[])
atomTypes['Sibf'].setActions(incrementBond=[], decrementBond=[], formBond=[], breakBond=[], incrementRadical=[], decrementRadical=[], incrementLonePair=[], decrementLonePair=[])
atomTypes['S' ].setActions(incrementBond=['S'], decrementBond=['S'], formBond=['S'], breakBond=['S'], incrementRadical=['S'], decrementRadical=['S'], incrementLonePair=['S'], decrementLonePair=['S'])
atomTypes['Ss' ].setActions(incrementBond=['Sd'], decrementBond=[], formBond=['Ss'], breakBond=['Ss'], incrementRadical=['Ss'], decrementRadical=['Ss'], incrementLonePair=['Ss'], decrementLonePair=['Ss'])
atomTypes['Sd' ].setActions(incrementBond=[], decrementBond=['Ss'], formBond=[], breakBond=[], incrementRadical=[], decrementRadical=[], incrementLonePair=['Sd'], decrementLonePair=['Sd'])
atomTypes['Sa' ].setActions(incrementBond=[], decrementBond=[], formBond=[], breakBond=[], incrementRadical=[], decrementRadical=[], incrementLonePair=[], decrementLonePair=[])
atomTypes['Cl' ].setActions(incrementBond=[], decrementBond=['Cl'], formBond=['Cl'], breakBond=['Cl'], incrementRadical=['Cl'], decrementRadical=['Cl'], incrementLonePair=[], decrementLonePair=[])
atomTypes['Ar' ].setActions(incrementBond=[], decrementBond=[], formBond=[], breakBond=[], incrementRadical=[], decrementRadical=[], incrementLonePair=[], decrementLonePair=[])
for atomType in atomTypes.values():
for items in [atomType.generic, atomType.specific,
atomType.incrementBond, atomType.decrementBond, atomType.formBond,
atomType.breakBond, atomType.incrementRadical, atomType.decrementRadical, atomType.incrementLonePair, atomType.decrementLonePair]:
for index in range(len(items)):
items[index] = atomTypes[items[index]]
def getAtomType(atom, bonds):
"""
Determine the appropriate atom type for an :class:`Atom` object `atom`
with local bond structure `bonds`, a ``dict`` containing atom-bond pairs.
"""
cython.declare(atomType=str, atomSymbol=str)
cython.declare(single=cython.int, double=cython.int, doubleR=cython.int,
doubleS=cython.int, doubleO=cython.int, triple=cython.int,
benzene=cython.int)
atomType = ''
# Count numbers of each higher-order bond type
single = 0; doubleR = 0; doubleO = 0; doubleS = 0; triple = 0; benzene = 0
for atom2, bond12 in bonds.iteritems():
if bond12.isSingle():
single += 1
elif bond12.isDouble():
if atom2.isOxygen():
doubleO += 1
elif atom2.isSulfur():
doubleS += 1
else:
# doubleR is for double bonds NOT to Oxygen or Sulfur
doubleR += 1
elif bond12.isTriple(): triple += 1
elif bond12.isBenzene(): benzene += 1
# double is for all double bonds, to anything
double = doubleR + doubleO + doubleS
# Use element and counts to determine proper atom type
atomSymbol = atom.symbol
if atomSymbol == 'H':
atomType = 'H'
elif atomSymbol == 'He':
atomType = 'He'
elif atomSymbol == 'C':
if double == 0 and triple == 0 and benzene == 0: atomType = 'Cs'
elif double == 1 and triple == 0 and benzene == 0 and doubleR == 1: atomType = 'Cd'
elif double == 2 and triple == 0 and benzene == 0: atomType = 'Cdd'
elif double == 0 and triple == 1 and benzene == 0: atomType = 'Ct'
elif double == 1 and triple == 0 and benzene == 0 and doubleO == 1: atomType = 'CO'
elif double == 1 and triple == 0 and benzene == 0 and doubleS == 1: atomType = 'CS'
elif double == 0 and triple == 0 and benzene == 2: atomType = 'Cb'
elif double == 0 and triple == 0 and benzene == 3: atomType = 'Cbf'
elif atomSymbol == 'N':
if double == 0 and triple == 0 and benzene == 0 and single in [0, 1, 2, 3]: atomType = 'N3s'
elif double == 1 and triple == 0 and benzene == 0 and single == 0 and doubleR == 1 and atom.lonePairs == 2: atomType = 'N1d'
elif double == 1 and triple == 0 and benzene == 0 and single in [0, 1]: atomType = 'N3d'
elif double == 0 and triple == 1 and benzene == 0 and single == 0: atomType = 'N3t'
elif double == 0 and triple == 0 and benzene == 2 and single == 0: atomType = 'N3b'
elif double == 0 and triple == 0 and benzene == 0 and single == 4: atomType = 'N5s'
elif double == 1 and triple == 0 and benzene == 0 and single == 2: atomType = 'N5d'
elif double == 2 and triple == 0 and benzene == 0 and single == 0: atomType = 'N5dd'
elif double == 0 and triple == 1 and benzene == 0 and single == 1: atomType = 'N5t'
elif double == 0 and triple == 0 and benzene == 2 and single == 1: atomType = 'N5b'
elif atomSymbol == 'O':
if double == 0 and triple == 0 and benzene == 0: atomType = 'Os'
elif double == 1 and triple == 0 and benzene == 0: atomType = 'Od'
elif len(bonds) == 0: atomType = 'Oa'
elif double == 0 and triple == 1 and benzene == 0: atomType = 'Ot'
elif atomSymbol == 'Ne':
atomType = 'Ne'
elif atomSymbol == 'Si':
if double == 0 and triple == 0 and benzene == 0: atomType = 'Sis'
elif double == 1 and triple == 0 and benzene == 0 and doubleO == 1: atomType = 'SiO'
elif double == 1 and triple == 0 and benzene == 0: atomType = 'Sid'
elif double == 2 and triple == 0 and benzene == 0: atomType = 'Sidd'
elif double == 0 and triple == 1 and benzene == 0: atomType = 'Sit'
elif double == 0 and triple == 0 and benzene == 2: atomType = 'Sib'
elif double == 0 and triple == 0 and benzene == 3: atomType = 'Sibf'
elif atomSymbol == 'S':
if double == 0 and triple == 0 and benzene == 0: atomType = 'Ss'
elif double == 1 and triple == 0 and benzene == 0: atomType = 'Sd'
elif len(bonds) == 0: atomType = 'Sa'
elif atomSymbol == 'Cl':
atomType = 'Cl'
elif atomSymbol == 'Ar':
atomType = 'Ar'
# Raise exception if we could not identify the proper atom type
if atomType == '':
raise AtomTypeError('Unable to determine atom type for atom {0}, which has {1:d} double bonds to C, {2:d} double bonds to O, {3:d} double bonds to S, {4:d} triple bonds, and {5:d} benzene bonds.'.format(atom, doubleR, doubleO, doubleS, triple, benzene))
return atomTypes[atomType]
| {
"content_hash": "05bd6b6573ed11a219bf00922f6b9dea",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 261,
"avg_line_length": 73.83727034120734,
"alnum_prop": 0.5713422437082326,
"repo_name": "pierrelb/RMG-Py",
"id": "8a215e15bcfe8403dd32a2d088c6e4b8ec670e30",
"size": "29530",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rmgpy/molecule/atomtype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "461"
},
{
"name": "Jupyter Notebook",
"bytes": "32950"
},
{
"name": "Makefile",
"bytes": "5832"
},
{
"name": "Python",
"bytes": "3507378"
},
{
"name": "Shell",
"bytes": "2733"
}
],
"symlink_target": ""
} |
import Queue
import sqlite3
import threading
class Db(object):
def __init__(self, db, injector):
self.db = db
self.injector = injector
self.tasks = Queue.Queue()
self.position = None
self.pending_events = []
self.running = True
self.thread = threading.Thread(target=self._process)
self.thread.daemon=True
self.thread.start()
def close(self):
self.tasks.put(lambda conn: self._close())
def reset(self):
self.tasks.put(lambda conn: self._reset())
def load(self, records, event=None):
self.tasks.put(lambda conn: self._load(conn, records, event))
def get_id(self, event):
self.tasks.put(lambda conn: self._get_id(conn, event))
def insert(self, id, data, event=None):
self.tasks.put(lambda conn: self._insert(conn, id, data, event))
def delete(self, id, event=None):
self.tasks.put(lambda conn: self._delete(conn, id, event))
def _reset(self, ignored=None):
self.position = None
def _close(self, ignored=None):
self.running = False
def _get_id(self, conn, event):
cursor = conn.execute("SELECT * FROM records ORDER BY id DESC")
row = cursor.fetchone()
if event:
if row:
event.id = row['id']
else:
event.id = 0
self.injector.trigger(event)
def _load(self, conn, records, event):
if self.position:
cursor = conn.execute("SELECT * FROM records WHERE id > ? ORDER BY id", (self.position,))
else:
cursor = conn.execute("SELECT * FROM records ORDER BY id")
while not records.full():
row = cursor.fetchone()
if row:
self.position = row['id']
records.put(dict(row))
else:
break
if event:
self.injector.trigger(event)
def _insert(self, conn, id, data, event):
if id:
conn.execute("INSERT INTO records(id, description) VALUES (?, ?)", (id, data))
else:
conn.execute("INSERT INTO records(description) VALUES (?)", (data,))
if event:
self.pending_events.append(event)
def _delete(self, conn, id, event):
conn.execute("DELETE FROM records WHERE id=?", (id,))
if event:
self.pending_events.append(event)
def _process(self):
conn = sqlite3.connect(self.db)
conn.row_factory = sqlite3.Row
with conn:
while self.running:
f = self.tasks.get(True)
try:
while True:
f(conn)
f = self.tasks.get(False)
except Queue.Empty: pass
conn.commit()
for event in self.pending_events:
self.injector.trigger(event)
self.pending_events = []
self.injector.close()
| {
"content_hash": "0ad1c5fb91885715ad27ee62d73d68dd",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 101,
"avg_line_length": 31.819148936170212,
"alnum_prop": 0.5392845202273487,
"repo_name": "jeckersb/Proton",
"id": "85f8191ee7e1c6524669f19058355e3acf2c0b89",
"size": "3803",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/python/db_common.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1207980"
},
{
"name": "C++",
"bytes": "21239"
},
{
"name": "CMake",
"bytes": "73662"
},
{
"name": "Go",
"bytes": "42743"
},
{
"name": "Groff",
"bytes": "420"
},
{
"name": "HTML",
"bytes": "11875"
},
{
"name": "Java",
"bytes": "1660338"
},
{
"name": "JavaScript",
"bytes": "333628"
},
{
"name": "Makefile",
"bytes": "5613"
},
{
"name": "PHP",
"bytes": "33476"
},
{
"name": "Perl",
"bytes": "119035"
},
{
"name": "Perl6",
"bytes": "878"
},
{
"name": "Python",
"bytes": "656950"
},
{
"name": "Ruby",
"bytes": "169915"
},
{
"name": "Shell",
"bytes": "14901"
}
],
"symlink_target": ""
} |
from utils.munin.base import MuninGraph
class NBMuninGraph(MuninGraph):
@property
def graph_config(self):
graph = {
'graph_category' : 'NewsBlur',
'graph_title' : 'NewsBlur Task Server Times',
'graph_vlabel' : 'Feed fetch time / server',
'graph_args' : '-l 0',
}
stats = self.stats
graph['graph_order'] = ' '.join(sorted(s['_id'] for s in stats))
graph.update(dict((("%s.label" % s['_id'], s['_id']) for s in stats)))
graph.update(dict((("%s.draw" % s['_id'], 'LINE1') for s in stats)))
return graph
def calculate_metrics(self):
servers = dict((("%s" % s['_id'], s['total']) for s in self.stats))
return servers
@property
def stats(self):
import datetime
from django.conf import settings
stats = settings.MONGOANALYTICSDB.nbanalytics.feed_fetches.aggregate([{
"$match": {
"date": {
"$gt": datetime.datetime.now() - datetime.timedelta(minutes=5),
},
},
}, {
"$group": {
"_id" : "$server",
"total" : {"$avg": "$total"},
},
}])
return stats['result']
if __name__ == '__main__':
NBMuninGraph().run()
| {
"content_hash": "e67ca752cbf5e285f4c3012bf7c7c6a0",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 83,
"avg_line_length": 28.354166666666668,
"alnum_prop": 0.4739162380602498,
"repo_name": "bruceyou/NewsBlur",
"id": "fed10164b37c0320a7220746a05645da01dde8a6",
"size": "1384",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "utils/munin/newsblur_tasks_times.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "14187"
},
{
"name": "C++",
"bytes": "8218"
},
{
"name": "CSS",
"bytes": "658348"
},
{
"name": "CoffeeScript",
"bytes": "6400"
},
{
"name": "Java",
"bytes": "574069"
},
{
"name": "JavaScript",
"bytes": "1495762"
},
{
"name": "Objective-C",
"bytes": "3672639"
},
{
"name": "Perl",
"bytes": "55612"
},
{
"name": "Python",
"bytes": "2321299"
},
{
"name": "R",
"bytes": "527"
},
{
"name": "Ruby",
"bytes": "870"
},
{
"name": "Shell",
"bytes": "10181"
}
],
"symlink_target": ""
} |
import multiprocessing
from time import time, sleep
import numpy as np
from rtgraph.core.constants import Constants
from rtgraph.common.logger import Logger as Log
TAG = "Simulator"
class SimulatorProcess(multiprocessing.Process):
"""
Simulates signals and converts them as raw data to feed the processes.
"""
def __init__(self, parser_process):
"""
Initialises values for process.
:param parser_process: Reference to a ParserProcess instance.
:type parser_process: ParserProcess.
"""
multiprocessing.Process.__init__(self)
self._exit = multiprocessing.Event()
self._period = None
self._parser = parser_process
Log.i(TAG, "Process Ready")
def open(self, port=None, speed=Constants.simulator_default_speed, timeout=0.5):
"""
Opens a specified serial port.
:param port: Not used.
:type port: str.
:param speed: Period of the generated signal.
:type speed: float.
:param timeout: Not used.
:type timeout: float.
:return: True if the port is available.
:rtype: bool.
"""
self._period = float(speed)
Log.i(TAG, "Using sample rate at {}".format(self._period))
return True
def run(self):
"""
Simulates raw data incoming as CSV.
:return:
"""
Log.i(TAG, "Process starting...")
timestamp = time()
coef = 2 * np.pi
while not self._exit.is_set():
stamp = time() - timestamp
self._parser.add([stamp, str(("{},{}\r\n".format(np.sin(coef * stamp), np.cos(coef * stamp))))
.encode(Constants.app_encoding)])
sleep(self._period)
Log.i(TAG, "Process finished")
def stop(self):
"""
Signals the process to stop acquiring data.
:return:
"""
Log.i(TAG, "Process finishing...")
self._exit.set()
@staticmethod
def get_ports():
"""
Gets a list of the available ports.
:return: List of available ports.
:rtype: str list.
"""
return ["Sine Simulator"]
@staticmethod
def get_speeds():
"""
Gets a list of the speeds.
:return: List of the speeds.
:rtype: str list.
"""
return [str(v) for v in [0.002, 0.004, 0.005, 0.010, 0.020, 0.050, 0.100, 0.250]]
| {
"content_hash": "aec6fa2b54a1f772bf01477f014f8cab",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 106,
"avg_line_length": 29.142857142857142,
"alnum_prop": 0.5612745098039216,
"repo_name": "ssepulveda/RTGraph",
"id": "4e1abdbf54dc4e0ceb93bbb6fbe5d355a2e78f9e",
"size": "2448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rtgraph/processors/Simulator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "777"
},
{
"name": "Python",
"bytes": "56288"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.util.memo import memoized, memoized_property, per_instance, testable_memoized_property
class MemoizeTest(unittest.TestCase):
def test_function_application_positional(self):
calculations = []
@memoized
def multiply(first, second):
calculations.append((first, second))
return first * second
self.assertEqual(6, multiply(2, 3))
self.assertEqual(6, multiply(3, 2))
self.assertEqual(6, multiply(2, 3))
self.assertEqual([(2, 3), (3, 2)], calculations)
def test_function_application_kwargs(self):
calculations = []
@memoized()
def multiply(first, second):
calculations.append((first, second))
return first * second
self.assertEqual(6, multiply(first=2, second=3))
self.assertEqual(6, multiply(second=3, first=2))
self.assertEqual(6, multiply(first=2, second=3))
self.assertEqual([(2, 3)], calculations)
def test_function_application_mixed(self):
calculations = []
@memoized
def func(*args, **kwargs):
calculations.append((args, kwargs))
return args, kwargs
self.assertEqual((('a',), {'fred': 42, 'jane': True}), func('a', fred=42, jane=True))
self.assertEqual((('a', 42), {'jane': True}), func('a', 42, jane=True))
self.assertEqual((('a',), {'fred': 42, 'jane': True}), func('a', jane=True, fred=42))
self.assertEqual([(('a',), {'fred': 42, 'jane': True}),
(('a', 42), {'jane': True})], calculations)
def test_function_application_potentially_ambiguous_parameters(self):
calculations = []
@memoized
def func(*args, **kwargs):
calculations.append((args, kwargs))
return args, kwargs
self.assertEqual(((('a', 42),), {}), func(('a', 42)))
self.assertEqual(((), {'a': 42}), func(a=42))
self.assertEqual([((('a', 42),), {}),
((), {'a': 42})], calculations)
def test_key_factory(self):
def create_key(num):
return num % 2
calculations = []
@memoized(key_factory=create_key)
def square(num):
calculations.append(num)
return num * num
self.assertEqual(4, square(2))
self.assertEqual(9, square(3))
self.assertEqual(4, square(4))
self.assertEqual(9, square(5))
self.assertEqual(4, square(8))
self.assertEqual(9, square(7))
self.assertEqual([2, 3], calculations)
def test_cache_factory(self):
class SingleEntryMap(dict):
def __setitem__(self, key, value):
self.clear()
return super(SingleEntryMap, self).__setitem__(key, value)
calculations = []
@memoized(cache_factory=SingleEntryMap)
def square(num):
calculations.append(num)
return num * num
self.assertEqual(4, square(2))
self.assertEqual(4, square(2))
self.assertEqual(9, square(3))
self.assertEqual(9, square(3))
self.assertEqual(4, square(2))
self.assertEqual(4, square(2))
self.assertEqual([2, 3, 2], calculations)
def test_forget(self):
calculations = []
@memoized
def square(num):
calculations.append(num)
return num * num
self.assertEqual(4, square(2))
self.assertEqual(4, square(2))
self.assertEqual(9, square(3))
self.assertEqual(9, square(3))
square.forget(2)
self.assertEqual(4, square(2))
self.assertEqual(4, square(2))
self.assertEqual(9, square(3))
self.assertEqual(9, square(3))
self.assertEqual([2, 3, 2], calculations)
def test_clear(self):
calculations = []
@memoized
def square(num):
calculations.append(num)
return num * num
self.assertEqual(4, square(2))
self.assertEqual(4, square(2))
self.assertEqual(9, square(3))
self.assertEqual(9, square(3))
square.clear()
self.assertEqual(4, square(2))
self.assertEqual(4, square(2))
self.assertEqual(9, square(3))
self.assertEqual(9, square(3))
self.assertEqual([2, 3, 2, 3], calculations)
class _Called(object):
def __init__(self, increment):
self._calls = 0
self._increment = increment
def _called(self):
self._calls += self._increment
return self._calls
def test_instancemethod_application_id_eq(self):
class Foo(self._Called):
@memoized
def calls(self):
return self._called()
foo1 = Foo(1)
foo2 = Foo(2)
# Different (`!=`) Foo instances have their own cache:
self.assertEqual(1, foo1.calls())
self.assertEqual(1, foo1.calls())
self.assertEqual(2, foo2.calls())
self.assertEqual(2, foo2.calls())
def test_instancemethod_application_degenerate_eq(self):
class Foo(self._Called):
@memoized
def calls_per_eq(self):
return self._called()
@memoized(key_factory=per_instance)
def calls_per_instance(self):
return self._called()
def __hash__(self):
return hash(type)
def __eq__(self, other):
return type(self) == type(other)
foo1 = Foo(3)
foo2 = Foo(4)
# Here foo1 and foo2 share a cache since they are `==` which is likely surprising behavior:
self.assertEqual(3, foo1.calls_per_eq())
self.assertEqual(3, foo1.calls_per_eq())
self.assertEqual(3, foo2.calls_per_eq())
self.assertEqual(3, foo2.calls_per_eq())
# Here the cache is split between the instances which is likely the expected behavior:
self.assertEqual(6, foo1.calls_per_instance())
self.assertEqual(6, foo1.calls_per_instance())
self.assertEqual(4, foo2.calls_per_instance())
self.assertEqual(4, foo2.calls_per_instance())
def test_descriptor_application_invalid(self):
with self.assertRaises(ValueError):
# Can't decorate a descriptor
class Foo(object):
@memoized
@property
def name(self):
pass
def test_descriptor_application_valid(self):
class Foo(self._Called):
@property
@memoized
def calls(self):
return self._called()
foo1 = Foo(1)
self.assertEqual(1, foo1.calls)
self.assertEqual(1, foo1.calls)
foo2 = Foo(2)
self.assertEqual(2, foo2.calls)
self.assertEqual(2, foo2.calls)
def test_memoized_property(self):
class Foo(self._Called):
@memoized_property
def calls(self):
return self._called()
foo1 = Foo(1)
self.assertEqual(1, foo1.calls)
self.assertEqual(1, foo1.calls)
foo2 = Foo(2)
self.assertEqual(2, foo2.calls)
self.assertEqual(2, foo2.calls)
with self.assertRaises(AttributeError):
foo2.calls = None
def test_mutable_memoized_property(self):
class Foo(self._Called):
@testable_memoized_property
def calls(self):
return self._called()
foo1 = Foo(1)
self.assertEqual(1, foo1.calls)
self.assertEqual(1, foo1.calls)
foo2 = Foo(2)
self.assertEqual(2, foo2.calls)
self.assertEqual(2, foo2.calls)
foo2.calls = None
self.assertIsNone(foo2.calls)
def test_memoized_property_forget(self):
class Foo(self._Called):
@memoized_property
def calls(self):
return self._called()
foo1 = Foo(1)
# Forgetting before caching should be a harmless noop
del foo1.calls
self.assertEqual(1, foo1.calls)
self.assertEqual(1, foo1.calls)
foo2 = Foo(2)
self.assertEqual(2, foo2.calls)
self.assertEqual(2, foo2.calls)
# Now un-cache foo2's calls result and observe no effect on foo1.calls, but a re-compute for
# foo2.calls
del foo2.calls
self.assertEqual(1, foo1.calls)
self.assertEqual(1, foo1.calls)
self.assertEqual(4, foo2.calls)
self.assertEqual(4, foo2.calls)
| {
"content_hash": "619c4b395e90eafa57bf28b1d7ae395a",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 97,
"avg_line_length": 26.322033898305083,
"alnum_prop": 0.6309079201545396,
"repo_name": "ity/pants",
"id": "855b94ad846f0dfb155940a55f9a883ec0f0a01e",
"size": "7912",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/util/test_memo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1526"
},
{
"name": "HTML",
"bytes": "75140"
},
{
"name": "Java",
"bytes": "402667"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4960888"
},
{
"name": "Scala",
"bytes": "85556"
},
{
"name": "Shell",
"bytes": "58420"
},
{
"name": "Thrift",
"bytes": "2919"
}
],
"symlink_target": ""
} |
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc6010
class UnconstrainedCCCExtensionTestCase(unittest.TestCase):
unconstrained_pem_text = "MB0GCCsGAQUFBwESBBEwDzANBgsqhkiG9w0BCRABAA=="
def setUp(self):
self.asn1Spec = rfc5280.Extension()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.unconstrained_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(
rfc6010.id_pe_cmsContentConstraints, asn1Object['extnID'])
evalue, rest = der_decoder(
asn1Object['extnValue'],
asn1Spec=rfc6010.CMSContentConstraints())
self.assertFalse(rest)
self.assertTrue(evalue.prettyPrint())
self.assertEqual(asn1Object['extnValue'], der_encoder(evalue))
self.assertEqual(
rfc6010.id_ct_anyContentType, evalue[0]['contentType'])
class ConstrainedCCCExtensionTestCase(unittest.TestCase):
constrained_pem_text = """\
MIG7BggrBgEFBQcBEgSBrjCBqzA0BgsqhkiG9w0BCRABEDAlMCMGCyqGSIb3DQEJ
EAwBMRQMElZpZ2lsIFNlY3VyaXR5IExMQzAwBgpghkgBZQIBAk4CMCIwIAYLKoZI
hvcNAQkQDAsxEQwPa3RhLmV4YW1wbGUuY29tMDEGCyqGSIb3DQEJEAEZMCIwIAYL
KoZIhvcNAQkQDAsxEQwPa3RhLmV4YW1wbGUuY29tMA4GCSqGSIb3DQEHAQoBAQ==
"""
def setUp(self):
self.asn1Spec = rfc5280.Extension()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.constrained_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(
rfc6010.id_pe_cmsContentConstraints, asn1Object['extnID'])
evalue, rest = der_decoder(
asn1Object['extnValue'],
asn1Spec=rfc6010.CMSContentConstraints())
self.assertFalse(rest)
self.assertTrue(evalue.prettyPrint())
self.assertEqual(asn1Object['extnValue'], der_encoder(evalue))
constraint_count = 0
attribute_count = 0
cannot_count = 0
for ccc in evalue:
constraint_count += 1
if ccc['canSource'] == 1:
cannot_count += 1
if ccc['attrConstraints'].hasValue():
for attr in ccc['attrConstraints']:
attribute_count += 1
self.assertEqual(4, constraint_count)
self.assertEqual(3, attribute_count)
self.assertEqual(1, cannot_count)
def testExtensionsMap(self):
substrate = pem.readBase64fromText(self.constrained_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertIn(asn1Object['extnID'], rfc5280.certificateExtensionsMap)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "47cfad74b21e64b2eed6ad3ac269e319",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 77,
"avg_line_length": 34.638297872340424,
"alnum_prop": 0.6937960687960688,
"repo_name": "etingof/pyasn1-modules",
"id": "1726a8d880a3cbf4527456bd8ea6704df98fcd5a",
"size": "3429",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_rfc6010.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1395999"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from src.fourier_series.fourier_series import GeneralizedFourierSeries
from src.fourier_series.legendre.polynomials import legendre_polyval
from src.fourier_series.ringbuffer import Ringbuffer
from src.fourier_series.vector import to_col
class LegendreSeries(GeneralizedFourierSeries):
functions = ('step_function', 'v_function')
start_index = 1
a = 0 # Jacobi polynomial variables
b = 0 # Jacobi polynomial variables
def __init__(self, x, x0, degree):
if degree is None:
self.degree_leg = None
else:
self.degree_leg = degree + 3
self.x = np.array(x)
self.x0 = np.array(x0)
self.shape = [self.x0.size, self.x.size]
super().__init__(self.functions, degree)
def __call__(self, *args, **kwargs):
legendre_at_x0 = Ringbuffer(buffer_size=3,
array_size=self.shape[0],
start_index=0)
legendre_at_x0[:] = np.zeros(self.shape[0])
step_function_sum = np.zeros(self.shape)
v_function_sum = np.zeros(self.shape)
gen_a = legendre_polyval(self.x0, self.degree_leg, skip=2)
gen_x = legendre_polyval(self.x, self.degree_leg, skip=2)
gen = zip(gen_a, gen_x)
for (k, legendre_a), (_, legendre_x) in gen:
step_function = np.array(step_function_sum)
''' Step Function '''
s = k - 1
temp1 = legendre_a[s - 1, :] - legendre_a[s + 1, :]
legendre_at_x0[:] = temp1
temp3 = legendre_x[s, :] * to_col(legendre_at_x0[s])
step_function_sum += temp3
''' V Function '''
v = k - 2
if v == 0:
temp4 = legendre_at_x0[v + 1] / (-(2 * v + 3))
temp4 = to_col(temp4)
temp4 = legendre_x[v, :] * temp4
v_function_sum += temp4
elif v >= 1:
temp4 = legendre_at_x0[v - 1] / (2 * v - 1) + \
legendre_at_x0[v + 1] / (-(2 * v + 3))
temp4 = to_col(temp4)
temp4 = legendre_x[v, :] * temp4
v_function_sum += temp4
v_function = np.array(v_function_sum)
if v >= 1:
yield v, (step_function, v_function)
| {
"content_hash": "47beee2d543e19b836e49c3fb8a5dadf",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 70,
"avg_line_length": 34.15068493150685,
"alnum_prop": 0.5302847974328119,
"repo_name": "jaantollander/Fourier-Legendre",
"id": "9e8960e081acd1c152a9f2417557e6fb47d507cb",
"size": "2508",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/fourier_series/legendre/series.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "109208"
}
],
"symlink_target": ""
} |
import requests
from diaryapi import Diary_API
from authloader import load_user, load_app_info
username, password = load_user()
appkey, key = load_app_info()
with requests.Session() as sess:
d_api = Diary_API(sess, appkey=appkey,
key=key)
if not d_api.sid and not d_api.auth(username, password):
print(d_api.error)
exit()
print(d_api.sid)
#lst = d_api.post_get()
#lst__ = list(lst)
#print(lst__[:2])
#post_id = lst__[0][0]
#lst = d_api.comment_get(post_id)
#print(lst__[:2])
#for post_id, post_data, comments in d_api.post_and_comments(type_='by_id', ids=['201289904',]):
# print(post_id)
for url in d_api.find_post('музыка'):
print(url)
| {
"content_hash": "760e109bdc3300113e0f3ee8d7900cb2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 100,
"avg_line_length": 31.708333333333332,
"alnum_prop": 0.5795006570302234,
"repo_name": "yastrov/py-diary-api",
"id": "8062c434047a47f37f0e9efeae2612de6a7877e5",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15113"
}
],
"symlink_target": ""
} |
from .linear_model import yule_walker
from statsmodels import PytestTester
test = PytestTester()
| {
"content_hash": "98386318e95fa11a8bb530f2f1384ed0",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 37,
"avg_line_length": 24.5,
"alnum_prop": 0.8163265306122449,
"repo_name": "ChadFulton/statsmodels",
"id": "55dc22530f382a25a806f60d60df1af6e64506ae",
"size": "98",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statsmodels/regression/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "3469"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "2609"
},
{
"name": "Python",
"bytes": "11749760"
},
{
"name": "R",
"bytes": "90986"
},
{
"name": "Rebol",
"bytes": "123"
},
{
"name": "Shell",
"bytes": "8181"
},
{
"name": "Smarty",
"bytes": "1014"
},
{
"name": "Stata",
"bytes": "65045"
}
],
"symlink_target": ""
} |
'''
© 2012-2013 eBay Software Foundation
Authored by: Tim Keefer
Licensed under CDDL 1.0
'''
import os
import sys
from optparse import OptionParser
try:
input = raw_input
except NameError:
pass
sys.path.insert(0, '%s/../' % os.path.dirname(__file__))
from common import dump
import ebaysdk
from ebaysdk.exception import ConnectionError
from ebaysdk.shopping import Connection as Shopping
def init_options():
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-d", "--debug",
action="store_true", dest="debug", default=False,
help="Enabled debugging [default: %default]")
parser.add_option("-y", "--yaml",
dest="yaml", default='ebay.yaml',
help="Specifies the name of the YAML defaults file. [default: %default]")
parser.add_option("-a", "--appid",
dest="appid", default=None,
help="Specifies the eBay application id to use.")
(opts, args) = parser.parse_args()
return opts, args
def run(opts):
api = Shopping(debug=opts.debug, appid=opts.appid, config_file=opts.yaml,
warnings=True)
print("Shopping samples for SDK version %s" % ebaysdk.get_version())
try:
response = api.execute('FindPopularItems', {'QueryKeywords': 'Python'})
dump(api)
print("Matching Titles:")
for item in response.reply.ItemArray.Item:
print(item.Title)
except ConnectionError as e:
print(e)
print(e.response.dict())
def popularSearches(opts):
api = Shopping(debug=opts.debug, appid=opts.appid, config_file=opts.yaml,
warnings=True)
choice = True
while choice:
choice = input('Search: ')
if choice == 'quit':
break
mySearch = {
"MaxKeywords": 10,
"QueryKeywords": choice,
}
try:
response = api.execute('FindPopularSearches', mySearch)
dump(api, full=False)
print("Related: %s" %
response.reply.PopularSearchResult.RelatedSearches)
for term in response.reply.PopularSearchResult.AlternativeSearches.split(';')[:3]:
api.execute('FindPopularItems', {
'QueryKeywords': term, 'MaxEntries': 3})
print("Term: %s" % term)
try:
for item in response.reply.ItemArray.Item:
print(item.Title)
except AttributeError:
pass
dump(api)
print("\n")
except ConnectionError as e:
print(e)
print(e.response.dict())
def categoryInfo(opts):
try:
api = Shopping(debug=opts.debug, appid=opts.appid, config_file=opts.yaml,
warnings=True)
response = api.execute('GetCategoryInfo', {"CategoryID": 3410})
dump(api, full=False)
except ConnectionError as e:
print(e)
print(e.response.dict())
def with_affiliate_info(opts):
try:
api = Shopping(debug=opts.debug, appid=opts.appid,
config_file=opts.yaml, warnings=True,
trackingid=1234, trackingpartnercode=9)
mySearch = {
"MaxKeywords": 10,
"QueryKeywords": 'shirt',
}
response = api.execute('FindPopularSearches', mySearch)
dump(api, full=False)
except ConnectionError as e:
print(e)
print(e.response.dict())
def using_attributes(opts):
try:
api = Shopping(debug=opts.debug, appid=opts.appid,
config_file=opts.yaml, warnings=True)
response = api.execute('FindProducts', {
"ProductID": {'@attrs': {'type': 'ISBN'},
'#text': '0596154488'}})
dump(api, full=False)
except ConnectionError as e:
print(e)
print(e.response.dict())
if __name__ == "__main__":
(opts, args) = init_options()
run(opts)
popularSearches(opts)
categoryInfo(opts)
with_affiliate_info(opts)
using_attributes(opts)
| {
"content_hash": "9457b073566a3a95d9e8fab88de1e530",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 95,
"avg_line_length": 25.431137724550897,
"alnum_prop": 0.5589828113962797,
"repo_name": "shashwatsehgal/ToLop",
"id": "703409bfc113cdc7490f596d5d6534b1ec0567bb",
"size": "4272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ebaysdk-python-master/samples/shopping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "646697"
},
{
"name": "HTML",
"bytes": "99670"
},
{
"name": "JavaScript",
"bytes": "853379"
},
{
"name": "Python",
"bytes": "1641619"
}
],
"symlink_target": ""
} |
"""
Tests for L{twisted.trial.util}
"""
from __future__ import division, absolute_import
import os, sys
from zope.interface import implementer
from twisted.python.compat import _PY3, NativeStringIO
from twisted.python import filepath
from twisted.internet.interfaces import IProcessTransport
from twisted.internet import defer
from twisted.internet.base import DelayedCall
from twisted.python.failure import Failure
from twisted.trial.unittest import SynchronousTestCase
from twisted.trial import util
from twisted.trial.util import (
DirtyReactorAggregateError, _Janitor, excInfoOrFailureToExcInfo,
acquireAttribute)
from twisted.trial.test import suppression
class TestMktemp(SynchronousTestCase):
"""
Tests for L{TestCase.mktemp}, a helper function for creating temporary file
or directory names.
"""
def test_name(self):
"""
The path name returned by C{mktemp} is directly beneath a directory
which identifies the test method which created the name.
"""
name = self.mktemp()
dirs = os.path.dirname(name).split(os.sep)[:-1]
self.assertEqual(
dirs, ['twisted.trial.test.test_util', 'TestMktemp', 'test_name'])
def test_unique(self):
"""
Repeated calls to C{mktemp} return different values.
"""
name = self.mktemp()
self.assertNotEqual(name, self.mktemp())
def test_created(self):
"""
The directory part of the path name returned by C{mktemp} exists.
"""
name = self.mktemp()
dirname = os.path.dirname(name)
self.assertTrue(os.path.exists(dirname))
self.assertFalse(os.path.exists(name))
def test_location(self):
"""
The path returned by C{mktemp} is beneath the current working directory.
"""
path = os.path.abspath(self.mktemp())
self.assertTrue(path.startswith(os.getcwd()))
class TestIntrospection(SynchronousTestCase):
def test_containers(self):
"""
When pased a test case, L{util.getPythonContainers} returns a list
including the test case and the module the test case is defined in.
"""
parents = util.getPythonContainers(
suppression.SynchronousTestSuppression2.testSuppressModule)
expected = [suppression.SynchronousTestSuppression2, suppression]
for a, b in zip(parents, expected):
self.assertEqual(a, b)
# Also, the function is deprecated.
warnings = self.flushWarnings([self.test_containers])
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(
"twisted.trial.util.getPythonContainers was deprecated in "
"Twisted 12.3.0: This function never worked correctly. "
"Implement lookup on your own.",
warnings[0]['message'])
self.assertEqual(1, len(warnings))
if _PY3:
test_containers.skip = "getPythonContainers is unsupported on Python 3."
class TestRunSequentially(SynchronousTestCase):
"""
Sometimes it is useful to be able to run an arbitrary list of callables,
one after the other.
When some of those callables can return Deferreds, things become complex.
"""
def assertDeferredResult(self, deferred, assertFunction, *args, **kwargs):
"""
Call the given assertion function against the current result of a
Deferred.
"""
result = []
deferred.addCallback(result.append)
assertFunction(result[0], *args, **kwargs)
def test_emptyList(self):
"""
When asked to run an empty list of callables, runSequentially returns a
successful Deferred that fires an empty list.
"""
d = util._runSequentially([])
self.assertDeferredResult(d, self.assertEqual, [])
def test_singleSynchronousSuccess(self):
"""
When given a callable that succeeds without returning a Deferred,
include the return value in the results list, tagged with a SUCCESS
flag.
"""
d = util._runSequentially([lambda: None])
self.assertDeferredResult(d, self.assertEqual, [(defer.SUCCESS, None)])
def test_singleSynchronousFailure(self):
"""
When given a callable that raises an exception, include a Failure for
that exception in the results list, tagged with a FAILURE flag.
"""
d = util._runSequentially([lambda: self.fail('foo')])
def check(results):
[(flag, fail)] = results
fail.trap(self.failureException)
self.assertEqual(fail.getErrorMessage(), 'foo')
self.assertEqual(flag, defer.FAILURE)
self.assertDeferredResult(d, check)
def test_singleAsynchronousSuccess(self):
"""
When given a callable that returns a successful Deferred, include the
result of the Deferred in the results list, tagged with a SUCCESS flag.
"""
d = util._runSequentially([lambda: defer.succeed(None)])
self.assertDeferredResult(d, self.assertEqual, [(defer.SUCCESS, None)])
def test_singleAsynchronousFailure(self):
"""
When given a callable that returns a failing Deferred, include the
failure the results list, tagged with a FAILURE flag.
"""
d = util._runSequentially([lambda: defer.fail(ValueError('foo'))])
def check(results):
[(flag, fail)] = results
fail.trap(ValueError)
self.assertEqual(fail.getErrorMessage(), 'foo')
self.assertEqual(flag, defer.FAILURE)
self.assertDeferredResult(d, check)
def test_callablesCalledInOrder(self):
"""
Check that the callables are called in the given order, one after the
other.
"""
log = []
deferreds = []
def append(value):
d = defer.Deferred()
log.append(value)
deferreds.append(d)
return d
util._runSequentially([lambda: append('foo'),
lambda: append('bar')])
# runSequentially should wait until the Deferred has fired before
# running the second callable.
self.assertEqual(log, ['foo'])
deferreds[-1].callback(None)
self.assertEqual(log, ['foo', 'bar'])
def test_continuesAfterError(self):
"""
If one of the callables raises an error, then runSequentially continues
to run the remaining callables.
"""
d = util._runSequentially([lambda: self.fail('foo'), lambda: 'bar'])
def check(results):
[(flag1, fail), (flag2, result)] = results
fail.trap(self.failureException)
self.assertEqual(flag1, defer.FAILURE)
self.assertEqual(fail.getErrorMessage(), 'foo')
self.assertEqual(flag2, defer.SUCCESS)
self.assertEqual(result, 'bar')
self.assertDeferredResult(d, check)
def test_stopOnFirstError(self):
"""
If the C{stopOnFirstError} option is passed to C{runSequentially}, then
no further callables are called after the first exception is raised.
"""
d = util._runSequentially([lambda: self.fail('foo'), lambda: 'bar'],
stopOnFirstError=True)
def check(results):
[(flag1, fail)] = results
fail.trap(self.failureException)
self.assertEqual(flag1, defer.FAILURE)
self.assertEqual(fail.getErrorMessage(), 'foo')
self.assertDeferredResult(d, check)
class DirtyReactorAggregateErrorTest(SynchronousTestCase):
"""
Tests for the L{DirtyReactorAggregateError}.
"""
def test_formatDelayedCall(self):
"""
Delayed calls are formatted nicely.
"""
error = DirtyReactorAggregateError(["Foo", "bar"])
self.assertEqual(str(error),
"""\
Reactor was unclean.
DelayedCalls: (set twisted.internet.base.DelayedCall.debug = True to debug)
Foo
bar""")
def test_formatSelectables(self):
"""
Selectables are formatted nicely.
"""
error = DirtyReactorAggregateError([], ["selectable 1", "selectable 2"])
self.assertEqual(str(error),
"""\
Reactor was unclean.
Selectables:
selectable 1
selectable 2""")
def test_formatDelayedCallsAndSelectables(self):
"""
Both delayed calls and selectables can appear in the same error.
"""
error = DirtyReactorAggregateError(["bleck", "Boozo"],
["Sel1", "Sel2"])
self.assertEqual(str(error),
"""\
Reactor was unclean.
DelayedCalls: (set twisted.internet.base.DelayedCall.debug = True to debug)
bleck
Boozo
Selectables:
Sel1
Sel2""")
class StubReactor(object):
"""
A reactor stub which contains enough functionality to be used with the
L{_Janitor}.
@ivar iterations: A list of the arguments passed to L{iterate}.
@ivar removeAllCalled: Number of times that L{removeAll} was called.
@ivar selectables: The value that will be returned from L{removeAll}.
@ivar delayedCalls: The value to return from L{getDelayedCalls}.
"""
def __init__(self, delayedCalls, selectables=None):
"""
@param delayedCalls: See L{StubReactor.delayedCalls}.
@param selectables: See L{StubReactor.selectables}.
"""
self.delayedCalls = delayedCalls
self.iterations = []
self.removeAllCalled = 0
if not selectables:
selectables = []
self.selectables = selectables
def iterate(self, timeout=None):
"""
Increment C{self.iterations}.
"""
self.iterations.append(timeout)
def getDelayedCalls(self):
"""
Return C{self.delayedCalls}.
"""
return self.delayedCalls
def removeAll(self):
"""
Increment C{self.removeAllCalled} and return C{self.selectables}.
"""
self.removeAllCalled += 1
return self.selectables
class StubErrorReporter(object):
"""
A subset of L{twisted.trial.itrial.IReporter} which records L{addError}
calls.
@ivar errors: List of two-tuples of (test, error) which were passed to
L{addError}.
"""
def __init__(self):
self.errors = []
def addError(self, test, error):
"""
Record parameters in C{self.errors}.
"""
self.errors.append((test, error))
class JanitorTests(SynchronousTestCase):
"""
Tests for L{_Janitor}!
"""
def test_cleanPendingSpinsReactor(self):
"""
During pending-call cleanup, the reactor will be spun twice with an
instant timeout. This is not a requirement, it is only a test for
current behavior. Hopefully Trial will eventually not do this kind of
reactor stuff.
"""
reactor = StubReactor([])
jan = _Janitor(None, None, reactor=reactor)
jan._cleanPending()
self.assertEqual(reactor.iterations, [0, 0])
def test_cleanPendingCancelsCalls(self):
"""
During pending-call cleanup, the janitor cancels pending timed calls.
"""
def func():
return "Lulz"
cancelled = []
delayedCall = DelayedCall(300, func, (), {},
cancelled.append, lambda x: None)
reactor = StubReactor([delayedCall])
jan = _Janitor(None, None, reactor=reactor)
jan._cleanPending()
self.assertEqual(cancelled, [delayedCall])
def test_cleanPendingReturnsDelayedCallStrings(self):
"""
The Janitor produces string representations of delayed calls from the
delayed call cleanup method. It gets the string representations
*before* cancelling the calls; this is important because cancelling the
call removes critical debugging information from the string
representation.
"""
delayedCall = DelayedCall(300, lambda: None, (), {},
lambda x: None, lambda x: None,
seconds=lambda: 0)
delayedCallString = str(delayedCall)
reactor = StubReactor([delayedCall])
jan = _Janitor(None, None, reactor=reactor)
strings = jan._cleanPending()
self.assertEqual(strings, [delayedCallString])
def test_cleanReactorRemovesSelectables(self):
"""
The Janitor will remove selectables during reactor cleanup.
"""
reactor = StubReactor([])
jan = _Janitor(None, None, reactor=reactor)
jan._cleanReactor()
self.assertEqual(reactor.removeAllCalled, 1)
def test_cleanReactorKillsProcesses(self):
"""
The Janitor will kill processes during reactor cleanup.
"""
@implementer(IProcessTransport)
class StubProcessTransport(object):
"""
A stub L{IProcessTransport} provider which records signals.
@ivar signals: The signals passed to L{signalProcess}.
"""
def __init__(self):
self.signals = []
def signalProcess(self, signal):
"""
Append C{signal} to C{self.signals}.
"""
self.signals.append(signal)
pt = StubProcessTransport()
reactor = StubReactor([], [pt])
jan = _Janitor(None, None, reactor=reactor)
jan._cleanReactor()
self.assertEqual(pt.signals, ["KILL"])
def test_cleanReactorReturnsSelectableStrings(self):
"""
The Janitor returns string representations of the selectables that it
cleaned up from the reactor cleanup method.
"""
class Selectable(object):
"""
A stub Selectable which only has an interesting string
representation.
"""
def __repr__(self):
return "(SELECTABLE!)"
reactor = StubReactor([], [Selectable()])
jan = _Janitor(None, None, reactor=reactor)
self.assertEqual(jan._cleanReactor(), ["(SELECTABLE!)"])
def test_postCaseCleanupNoErrors(self):
"""
The post-case cleanup method will return True and not call C{addError}
on the result if there are no pending calls.
"""
reactor = StubReactor([])
test = object()
reporter = StubErrorReporter()
jan = _Janitor(test, reporter, reactor=reactor)
self.assertTrue(jan.postCaseCleanup())
self.assertEqual(reporter.errors, [])
def test_postCaseCleanupWithErrors(self):
"""
The post-case cleanup method will return False and call C{addError} on
the result with a L{DirtyReactorAggregateError} Failure if there are
pending calls.
"""
delayedCall = DelayedCall(300, lambda: None, (), {},
lambda x: None, lambda x: None,
seconds=lambda: 0)
delayedCallString = str(delayedCall)
reactor = StubReactor([delayedCall], [])
test = object()
reporter = StubErrorReporter()
jan = _Janitor(test, reporter, reactor=reactor)
self.assertFalse(jan.postCaseCleanup())
self.assertEqual(len(reporter.errors), 1)
self.assertEqual(reporter.errors[0][1].value.delayedCalls,
[delayedCallString])
def test_postClassCleanupNoErrors(self):
"""
The post-class cleanup method will not call C{addError} on the result
if there are no pending calls or selectables.
"""
reactor = StubReactor([])
test = object()
reporter = StubErrorReporter()
jan = _Janitor(test, reporter, reactor=reactor)
jan.postClassCleanup()
self.assertEqual(reporter.errors, [])
def test_postClassCleanupWithPendingCallErrors(self):
"""
The post-class cleanup method call C{addError} on the result with a
L{DirtyReactorAggregateError} Failure if there are pending calls.
"""
delayedCall = DelayedCall(300, lambda: None, (), {},
lambda x: None, lambda x: None,
seconds=lambda: 0)
delayedCallString = str(delayedCall)
reactor = StubReactor([delayedCall], [])
test = object()
reporter = StubErrorReporter()
jan = _Janitor(test, reporter, reactor=reactor)
jan.postClassCleanup()
self.assertEqual(len(reporter.errors), 1)
self.assertEqual(reporter.errors[0][1].value.delayedCalls,
[delayedCallString])
def test_postClassCleanupWithSelectableErrors(self):
"""
The post-class cleanup method call C{addError} on the result with a
L{DirtyReactorAggregateError} Failure if there are selectables.
"""
selectable = "SELECTABLE HERE"
reactor = StubReactor([], [selectable])
test = object()
reporter = StubErrorReporter()
jan = _Janitor(test, reporter, reactor=reactor)
jan.postClassCleanup()
self.assertEqual(len(reporter.errors), 1)
self.assertEqual(reporter.errors[0][1].value.selectables,
[repr(selectable)])
class RemoveSafelyTests(SynchronousTestCase):
"""
Tests for L{util._removeSafely}.
"""
def test_removeSafelyNoTrialMarker(self):
"""
If a path doesn't contain a node named C{"_trial_marker"}, that path is
not removed by L{util._removeSafely} and a L{util._NoTrialMarker}
exception is raised instead.
"""
directory = self.mktemp().encode("utf-8")
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
self.assertRaises(util._NoTrialMarker, util._removeSafely, dirPath)
def test_removeSafelyRemoveFailsMoveSucceeds(self):
"""
If an L{OSError} is raised while removing a path in
L{util._removeSafely}, an attempt is made to move the path to a new
name.
"""
def dummyRemove():
"""
Raise an C{OSError} to emulate the branch of L{util._removeSafely}
in which path removal fails.
"""
raise OSError()
# Patch stdout so we can check the print statements in _removeSafely
out = NativeStringIO()
self.patch(sys, 'stdout', out)
# Set up a trial directory with a _trial_marker
directory = self.mktemp().encode("utf-8")
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
dirPath.child(b'_trial_marker').touch()
# Ensure that path.remove() raises an OSError
dirPath.remove = dummyRemove
util._removeSafely(dirPath)
self.assertIn("could not remove FilePath", out.getvalue())
def test_removeSafelyRemoveFailsMoveFails(self):
"""
If an L{OSError} is raised while removing a path in
L{util._removeSafely}, an attempt is made to move the path to a new
name. If that attempt fails, the L{OSError} is re-raised.
"""
def dummyRemove():
"""
Raise an C{OSError} to emulate the branch of L{util._removeSafely}
in which path removal fails.
"""
raise OSError("path removal failed")
def dummyMoveTo(path):
"""
Raise an C{OSError} to emulate the branch of L{util._removeSafely}
in which path movement fails.
"""
raise OSError("path movement failed")
# Patch stdout so we can check the print statements in _removeSafely
out = NativeStringIO()
self.patch(sys, 'stdout', out)
# Set up a trial directory with a _trial_marker
directory = self.mktemp().encode("utf-8")
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
dirPath.child(b'_trial_marker').touch()
# Ensure that path.remove() and path.moveTo() both raise OSErrors
dirPath.remove = dummyRemove
dirPath.moveTo = dummyMoveTo
error = self.assertRaises(OSError, util._removeSafely, dirPath)
self.assertEqual(str(error), "path movement failed")
self.assertIn("could not remove FilePath", out.getvalue())
class ExcInfoTests(SynchronousTestCase):
"""
Tests for L{excInfoOrFailureToExcInfo}.
"""
def test_excInfo(self):
"""
L{excInfoOrFailureToExcInfo} returns exactly what it is passed, if it is
passed a tuple like the one returned by L{sys.exc_info}.
"""
info = (ValueError, ValueError("foo"), None)
self.assertTrue(info is excInfoOrFailureToExcInfo(info))
def test_failure(self):
"""
When called with a L{Failure} instance, L{excInfoOrFailureToExcInfo}
returns a tuple like the one returned by L{sys.exc_info}, with the
elements taken from the type, value, and traceback of the failure.
"""
try:
1 / 0
except:
f = Failure()
self.assertEqual((f.type, f.value, f.tb), excInfoOrFailureToExcInfo(f))
class AcquireAttributeTests(SynchronousTestCase):
"""
Tests for L{acquireAttribute}.
"""
def test_foundOnEarlierObject(self):
"""
The value returned by L{acquireAttribute} is the value of the requested
attribute on the first object in the list passed in which has that
attribute.
"""
self.value = value = object()
self.assertTrue(value is acquireAttribute([self, object()], "value"))
def test_foundOnLaterObject(self):
"""
The same as L{test_foundOnEarlierObject}, but for the case where the 2nd
element in the object list has the attribute and the first does not.
"""
self.value = value = object()
self.assertTrue(value is acquireAttribute([object(), self], "value"))
def test_notFoundException(self):
"""
If none of the objects passed in the list to L{acquireAttribute} have
the requested attribute, L{AttributeError} is raised.
"""
self.assertRaises(AttributeError, acquireAttribute, [object()], "foo")
def test_notFoundDefault(self):
"""
If none of the objects passed in the list to L{acquireAttribute} have
the requested attribute and a default value is given, the default value
is returned.
"""
default = object()
self.assertTrue(default is acquireAttribute([object()], "foo", default))
class TestListToPhrase(SynchronousTestCase):
"""
Input is transformed into a string representation of the list,
with each item separated by delimiter (defaulting to a comma) and the final
two being separated by a final delimiter.
"""
def test_empty(self):
"""
If things is empty, an empty string is returned.
"""
sample = []
expected = ''
result = util._listToPhrase(sample, 'and')
self.assertEqual(expected, result)
def test_oneWord(self):
"""
With a single item, the item is returned.
"""
sample = ['One']
expected = 'One'
result = util._listToPhrase(sample, 'and')
self.assertEqual(expected, result)
def test_twoWords(self):
"""
Two words are separated by the final delimiter.
"""
sample = ['One', 'Two']
expected = 'One and Two'
result = util._listToPhrase(sample, 'and')
self.assertEqual(expected, result)
def test_threeWords(self):
"""
With more than two words, the first two are separated by the delimiter.
"""
sample = ['One', 'Two', 'Three']
expected = 'One, Two, and Three'
result = util._listToPhrase(sample, 'and')
self.assertEqual(expected, result)
def test_fourWords(self):
"""
If a delimiter is specified, it is used instead of the default comma.
"""
sample = ['One', 'Two', 'Three', 'Four']
expected = 'One; Two; Three; or Four'
result = util._listToPhrase(sample, 'or', delimiter='; ')
self.assertEqual(expected, result)
def test_notString(self):
"""
If something in things is not a string, it is converted into one.
"""
sample = [1, 2, 'three']
expected = '1, 2, and three'
result = util._listToPhrase(sample, 'and')
self.assertEqual(expected, result)
def test_stringTypeError(self):
"""
If things is a string, a TypeError is raised.
"""
sample = "One, two, three"
error = self.assertRaises(TypeError, util._listToPhrase, sample, 'and')
self.assertEqual(str(error), "Things must be a list or a tuple")
def test_iteratorTypeError(self):
"""
If things is an iterator, a TypeError is raised.
"""
sample = iter([1, 2, 3])
error = self.assertRaises(TypeError, util._listToPhrase, sample, 'and')
self.assertEqual(str(error), "Things must be a list or a tuple")
def test_generatorTypeError(self):
"""
If things is a generator, a TypeError is raised.
"""
def sample():
for i in range(2):
yield i
error = self.assertRaises(TypeError, util._listToPhrase, sample, 'and')
self.assertEqual(str(error), "Things must be a list or a tuple")
| {
"content_hash": "485ce6bee0c17bbf9539cb3db774e533",
"timestamp": "",
"source": "github",
"line_count": 777,
"max_line_length": 80,
"avg_line_length": 34.15958815958816,
"alnum_prop": 0.5901966694295833,
"repo_name": "timkrentz/SunTracker",
"id": "29497e5c87381218ca6c2ce019a8e100677d6f1c",
"size": "26619",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/trial/test/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
"""
Presence analyzer unit tests.
"""
import os.path
import json
import datetime
import unittest
from presence_analyzer import main, utils
TEST_DATA_CSV = os.path.join(
os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv'
)
TEST_USERS_XML = os.path.join(
os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_users.xml'
)
# pylint: disable=E1103
class PresenceAnalyzerViewsTestCase(unittest.TestCase):
"""
Views tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update({'DATA_CSV': TEST_DATA_CSV})
self.client = main.app.test_client()
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_mainpage(self):
"""
Test main page redirect.
"""
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
resp = self.client.get('/foo.html')
self.assertEqual(resp.status_code, 404)
def test_presence_mean_time_weekday(self):
"""
Test presence mean time page
"""
resp = self.client.get('/mean_time_weekday.html')
self.assertEqual(resp.status_code, 200)
resp = self.client.get('/bar.html')
self.assertEqual(resp.status_code, 404)
def test_presence_start_end(self):
"""
Test presence start-end page
"""
resp = self.client.get('/presence_start_end.html')
self.assertEqual(resp.status_code, 200)
resp = self.client.get('/foo_bar.html')
self.assertEqual(resp.status_code, 404)
def test_api_users(self):
"""
Test users listing.
"""
resp = self.client.get('/api/v1/users')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(len(data), 8)
self.assertDictEqual(data[0], {u'user_id': 141, u'name': u'Adam P.'})
class PresenceAnalyzerUtilsTestCase(unittest.TestCase):
"""
Utility functions tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update({'DATA_CSV': TEST_DATA_CSV})
main.app.config.update({'USERS_XML': TEST_USERS_XML})
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_get_data(self):
"""
Test parsing of CSV file.
"""
data = utils.get_data()
self.assertIsInstance(data, dict)
self.assertItemsEqual(data.keys(), [10, 11])
sample_date = datetime.date(2013, 9, 10)
self.assertIn(sample_date, data[10])
self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end'])
self.assertEqual(data[10][sample_date]['start'],
datetime.time(9, 39, 5))
def test_mean(self):
"""
Test calculating arithmetic mean
"""
self.assertIsInstance(utils.mean([1, 2, 3]), float)
self.assertEqual(utils.mean([1, 2, 3]), 2)
self.assertEqual(utils.mean([-10, 10]), 0)
def test_seconds_since_midnight(self):
"""
Test calculating amount on seconds since midnight
"""
self.assertIsInstance(utils.seconds_since_midnight(
datetime.datetime.now()), int)
self.assertEqual(
utils.seconds_since_midnight(datetime.time(2, 30, 15)), 9015)
def test_interval(self):
"""
Test calculating interval between two datetime.time objects in seconds
"""
start = datetime.datetime.now()
end = datetime.datetime.now() + datetime.timedelta(hours=1)
self.assertIsInstance(utils.interval(start, end), int)
self.assertEqual(utils.interval(start, end), 3600)
def test_group_by_weekday(self):
"""
Test groups presence entris by weekday
"""
sample_data = utils.get_data()
grouped_sample = utils.group_by_weekday(sample_data[10])
expected_result_for_empty_dict = {i: [] for i in range(7)}
expected_result_for_grouped_sample = {
0: [],
1: [30047],
2: [24465],
3: [23705],
4: [],
5: [],
6: []
}
self.assertEqual(len(grouped_sample), 7)
self.assertIsInstance(grouped_sample, dict)
self.assertEqual(
utils.group_by_weekday({}), expected_result_for_empty_dict)
self.assertEqual(grouped_sample, expected_result_for_grouped_sample)
def test_group_start_end_by_weekday(self):
"""
Test grouping start and end time by weekday
"""
expected_result = {
0: {
'starts': [], 'ends': []
},
1: {
'starts': [34745], 'ends': [64792]
},
2: {
'starts': [33592], 'ends': [58057]
},
3: {
'starts': [38926], 'ends': [62631]
},
4: {
'starts': [], 'ends': []
},
5: {
'starts': [], 'ends': []
},
6: {
'starts': [], 'ends': []
}
}
data = utils.get_data()
sample_data = utils.group_start_end_by_weekday(data[10])
self.assertIsInstance(sample_data, dict)
self.assertEqual(len(sample_data), 7)
self.assertEqual(sample_data, expected_result)
def test_parse_users_xml(self):
"""
Test xml parser
"""
parsed_data = utils.parse_users_xml()
expected_result = {'user_id': 19, 'name': 'Anna K.'}
self.assertEqual(len(parsed_data), 8)
self.assertIsInstance(parsed_data, list)
self.assertEqual(parsed_data[5], expected_result)
def suite():
"""
Default test suite.
"""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase))
suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase))
return suite
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a567cce03cac8ce0508d04e62f0cb026",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 78,
"avg_line_length": 29.483568075117372,
"alnum_prop": 0.5472929936305733,
"repo_name": "stxnext-kindergarten/presence-analyzer-apardon",
"id": "c2826777cf7991d701281404cb2635c78639c92c",
"size": "6304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/presence_analyzer/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "927"
},
{
"name": "Python",
"bytes": "23913"
}
],
"symlink_target": ""
} |
import os
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.scrollview import ScrollView
from kivy.uix.treeview import TreeView, TreeViewNode
from kivy.uix.treeview import TreeViewLabel
from kivy.uix.popup import Popup
from Journal import *
class JournalOverviewScreen( Screen ):
def __init__( self, **kwargs ):
super( JournalOverviewScreen, self ).__init__( **kwargs )
v_layout = BoxLayout( orientation = 'vertical',
spacing = 30 )
self.tree_view = TreeView( root_options = dict( text = 'Tree One' ),
hide_root = True,
indent_level = 4 )
self.tree_view.size_hint = ( 1, None )
self.tree_view.bind( minimum_height = self.tree_view.setter( 'height' ) )
scroll = ScrollView( pos = (0, 0) )
scroll.add_widget( self.tree_view )
v_layout.add_widget( scroll )
change_journal_layout = BoxLayout( orientation = 'horizontal',
spacing = 5,
size_hint_y = 0.1 )
change_journal_layout.add_widget(
Label( text = 'Journal file:' ) )
self.journal_file_input = TextInput(
text = App.get_running_app().journal_file )
change_journal_layout.add_widget( self.journal_file_input )
change_journal_layout.add_widget(
Button( text = 'Set another',
on_press = self.set_another_journal ) )
v_layout.add_widget( change_journal_layout )
back_to_progress_button = Button( text = 'Back',
size_hint_y = 0.2 )
back_to_progress_button.on_press = self.goto_progress
v_layout.add_widget( back_to_progress_button )
self.add_widget( v_layout )
def on_pre_enter( self ):
self.populate_tree_view()
def on_leave( self ):
for node in self.tree_view.iterate_all_nodes():
self.tree_view.remove_node( node )
def populate_tree_view( self ):
journal = App.get_running_app().journal
journal_node = self.tree_view.add_node(
TreeViewLabel( text = 'Journal', is_open = True ) )
for training in journal.trainings:
training_node = self.tree_view.add_node(
TreeViewLabel( text = training.description['date'] + ': Training' ),
journal_node )
label_str = 'Start time: ' + \
str( training.description['start_time'] )
#training.description['start_time'].strftime('%H:%M:%S')
self.tree_view.add_node(
TreeViewLabel( text = label_str ), training_node )
label_str = 'End time: ' + \
str( training.description['end_time'] )
self.tree_view.add_node(
TreeViewLabel( text = label_str ), training_node )
label_str = 'Duration: ' + \
str( training.description['duration'] )
self.tree_view.add_node(
TreeViewLabel( text = label_str ), training_node )
label_str = 'Training program: ' + \
str( training.description['training_program'] )
self.tree_view.add_node(
TreeViewLabel( text = label_str ), training_node )
label_str = 'Training index in program: ' + \
str( training.description['training_index_in_program'] )
self.tree_view.add_node(
TreeViewLabel( text = label_str ), training_node )
for exercise in training.exercises:
title_node_text = 'Exercise: ' + exercise.description['name']
if 'Metric' in exercise.description.get( 'type' ):
title_node_text = 'Metric: ' + exercise.description['name']
exc_node = self.tree_view.add_node(
TreeViewLabel( text = title_node_text ), training_node )
for essential_field in exercise.essential_fields:
label_str = essential_field + \
': ' + str( exercise.description[essential_field] )
self.tree_view.add_node(
TreeViewLabel( text = label_str ), exc_node )
if exercise.description['comment']:
label_str = 'Comment: ' + \
str( exercise.description['comment'] )
self.tree_view.add_node(
TreeViewLabel( text = label_str ), exc_node )
if training.description['comment']:
label_str = 'Comment: ' + \
str( training.description['comment'] )
self.tree_view.add_node(
TreeViewLabel( text = label_str ), training_node )
def goto_progress( self ):
self.parent.current = 'view_progress'
def set_another_journal( self, *rest ):
app = App.get_running_app()
journal_file = self.journal_file_input.text
filename, file_extension = os.path.splitext( journal_file )
if file_extension != '.json':
self.show_not_json_popup()
return
if journal_file != app.journal_file:
app.journal_file = journal_file
if os.path.isfile( app.journal_file ):
app.journal = Journal.load_journal( app.journal_file )
else:
app.journal = Journal()
app.write_config()
for node in self.tree_view.iterate_all_nodes():
self.tree_view.remove_node( node )
self.populate_tree_view()
def show_not_json_popup( self ):
popup_content = BoxLayout( orientation = 'vertical' )
popup_content.add_widget( Label(
text = 'The journal file is expected ' +
'to have a ".json" extension.\n' + 'Please, specify ' +
'another file.',
haling = 'center') )
close_btn = Button( text = 'Ok', size_hint_y = 0.2 )
popup_content.add_widget( close_btn )
popup = Popup(title = 'Error: journal file is not JSON',
content = popup_content,
size_hint=( None, None ), size=(400, 400) )
close_btn.bind( on_press = popup.dismiss )
popup.open()
| {
"content_hash": "4b7c1922fa979587cdbf11baf93f7461",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 84,
"avg_line_length": 46.843971631205676,
"alnum_prop": 0.5417108251324754,
"repo_name": "noooway/exj",
"id": "bf7cc369dc93736404fbc8f0d804a5d733fe42c0",
"size": "6605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "JournalOverviewScreen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84514"
}
],
"symlink_target": ""
} |
"""
Copyright 2010 Daniel Graziotin <daniel.graziotin@acm.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This module holds the Modality model
"""
from django.db import models, IntegrityError
import copy
MODE_CHOICES = (
(u'auto', u'Auto'),
(u'van', u'Van'),
(u'bus', u'Bus'),
)
class Modality(models.Model):
"""
Represents additional information about the modality of transportation being used.
See See `Modality <http://dycapo.org/Protocol#Modality>`_ for more info.
"""
kind = models.CharField(max_length=255, choices=MODE_CHOICES, blank=False, null=False, default=MODE_CHOICES[0][0])
capacity = models.PositiveIntegerField(blank=False, null=False, default=0)
vacancy = models.IntegerField(blank=False, null=False, default=0)
make = models.CharField(max_length=255, blank=True)
model_name = models.CharField(max_length=255, blank=True)
year = models.PositiveIntegerField(blank=True, null=False, default=0)
color = models.CharField(max_length=255, blank=True)
lic = models.CharField(max_length=255, blank=True)
cost = models.FloatField(blank=True, null=False, default=0.00)
person = models.ForeignKey('Person', blank=True, null=True)
href = models.URLField(verify_exists=False, blank=True, null=False)
def save(self, * args, ** kwargs):
if not self.kind or not self.capacity or self.vacancy < 0 or not self.make or not self.model_name:
raise IntegrityError('Attributes kind, capacity, vacancy, make, model_name MUST be given.')
super(Modality, self).save(*args, **kwargs)
def to_xmlrpc(self):
"""
Returns a Python dict that contains just the attributes we want to expose
in out XML-RPC methods
"""
mode_dict = copy.deepcopy(self.__dict__)
del mode_dict['id']
del mode_dict['_state']
return mode_dict
class Meta:
app_label = 'server'
| {
"content_hash": "9b604231c7fa04f904849c64b4912ffd",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 118,
"avg_line_length": 38.25,
"alnum_prop": 0.6887254901960784,
"repo_name": "dgraziotin/dycapo",
"id": "89b478576f2261f45b895c1266329805c0425331",
"size": "2448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/models/modality.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "578500"
}
],
"symlink_target": ""
} |
import Cookie
import os
from django.conf import settings
from common.tests import ViewTestCase
from common import api
from common import clean
from common import util
class JoinTest(ViewTestCase):
def setUp(self):
super(JoinTest, self).setUp()
self.form_data = {'nick': 'johndoe',
'first_name': 'John',
'last_name': 'Doe',
'email': 'johndoe@google.com',
'password': 'good*password',
'confirm': 'good*password',
'hide': '1',
#'invite': ''
}
def tearDown(self):
self.form_data = None
def assert_join_validation_error(self, response, content):
self.assertContains(response, content)
self.assertTemplateUsed(response, 'join.html')
self.assertTemplateUsed(response, 'form_error.html')
def test_join_page(self):
r = self.client.get('/join')
self.assertContains(r, 'SIGN UP')
self.assertTemplateUsed(r, 'join.html')
def test_join_with_valid_data(self):
r = self.client.post('/join', self.form_data)
r = self.assertRedirectsPrefix(r, '/welcome')
def test_join_with_invalid_email(self):
self.form_data['email'] = 'invalid'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'supply a valid email address')
def test_join_with_used_email(self):
self.form_data['email'] = 'popular@example.com'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'already associated')
def test_join_with_deleted_email(self):
self.form_data['email'] = 'popular@example.com'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'already associated')
api.actor_remove(api.ROOT, 'popular@example.com')
self.form_data['email'] = 'popular@example.com'
r = self.client.post('/join', self.form_data)
r = self.assertRedirectsPrefix(r, '/welcome')
def test_join_with_invalid_nick(self):
self.form_data['nick'] = 'a'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'Invalid nick')
def test_join_with_reserved_nick(self):
self.form_data['nick'] = 'popular'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'already in use')
def test_join_with_banned_nick(self):
self.form_data['nick'] = 'json'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'not allowed')
def test_join_with_used_nick(self):
self.form_data['nick'] = 'popular'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'already in use')
def test_join_with_used_nick_case_insensitive(self):
self.form_data['nick'] = 'Popular'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'already in use')
class WelcomeTest(ViewTestCase):
def setUp(self):
super(WelcomeTest, self).setUp()
self.login('girlfriend')
def tearDown(self):
self.logout()
def test_photo_view(self):
r = self.client.get('/welcome/1')
self.assertContains(r, 'Your photo')
self.assertTemplateUsed(r, 'welcome_photo.html')
def test_photo_upload(self):
nick = 'popular'
nick = clean.nick(nick)
old_avatar = api.actor_get(api.ROOT, nick).extra.get('icon',
'avatar_default')
self.login(nick)
f = open('testdata/test_avatar.jpg')
r = self.client.post('/welcome/1',
{
'imgfile': f,
'_nonce' :
util.create_nonce('popular', 'change_photo'),
})
r = self.assertRedirectsPrefix(r, '/welcome/1?')
new_avatar = api.actor_get(api.ROOT, nick).extra.get('icon',
'avatar_default')
self.assertNotEquals(old_avatar, new_avatar)
self.assertContains(r, 'Avatar uploaded')
self.assertTemplateUsed(r, 'welcome_photo.html')
self.assertTemplateUsed(r, 'flash.html')
def test_mobile_activation_view(self):
r = self.client.get('/welcome/2')
self.assertContains(r, 'SIGN IN')
self.assertTemplateUsed(r, 'welcome_mobile.html')
def test_contacts_view(self):
r = self.client.get('/welcome/3')
self.assertContains(r, 'Find some friends')
self.assertTemplateUsed(r, 'welcome_contacts.html')
def test_done_view(self):
r = self.client.get('/welcome/done')
self.assertContains(r, 'Congratulations!')
self.assertTemplateUsed(r, 'welcome_done.html')
| {
"content_hash": "56e9176efef6fba16ea3863f83edfc05",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 75,
"avg_line_length": 34.13768115942029,
"alnum_prop": 0.616005094459775,
"repo_name": "tallstreet/jaikuenginepatch",
"id": "0f72835234b34c50f3b424045ee474117f7627e9",
"size": "4711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "join/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "115039"
},
{
"name": "Python",
"bytes": "1011754"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "5208"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
import argparse
import json
import logging
import os
import signal
import socket
import sys
import threading
import time
import urllib2
import uuid
from collections import defaultdict
from multiprocessing import Process, Event
repo_root = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(1, os.path.join(repo_root, "tools", "wptserve"))
from wptserve import server as wptserve, handlers
from wptserve.router import any_method
sys.path.insert(1, os.path.join(repo_root, "tools", "pywebsocket", "src"))
from mod_pywebsocket import standalone as pywebsocket
routes = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py", handlers.python_script_handler),
(any_method, "/tools/*", handlers.ErrorHandler(404)),
(any_method, "/serve.py", handlers.ErrorHandler(404)),
(any_method, "*.py", handlers.python_script_handler),
("GET", "*.asis", handlers.as_is_handler),
("GET", "*", handlers.file_handler),
]
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
subdomains = [u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"]
logger = None
def default_logger(level):
logger = logging.getLogger("web-platform-tests")
logging.basicConfig(level=getattr(logging, level.upper()))
return logger
def open_socket(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port != 0:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', port))
sock.listen(5)
return sock
def get_port():
free_socket = open_socket(0)
port = free_socket.getsockname()[1]
logger.debug("Going to use port %s" % port)
free_socket.close()
return port
class ServerProc(object):
def __init__(self):
self.proc = None
self.daemon = None
self.stop = Event()
def start(self, init_func, config, paths, port, bind_hostname):
self.proc = Process(target=self.create_daemon, args=(init_func, config, paths, port, bind_hostname))
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, config, paths, port, bind_hostname):
try:
self.daemon = init_func(config, paths, port, bind_hostname)
except socket.error:
print >> sys.stderr, "Socket error on port %s" % port
raise
if self.daemon:
self.daemon.start(block=False)
try:
self.stop.wait()
except KeyboardInterrupt:
pass
def wait(self):
self.stop.set()
self.proc.join()
def kill(self):
self.stop.set()
self.proc.terminate()
self.proc.join()
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(config, paths, subdomains, bind_hostname):
port = get_port()
wrapper = ServerProc()
wrapper.start(start_http_server, config, paths, port, bind_hostname)
connected = False
for i in range(10):
try:
urllib2.urlopen("http://%s:%d/" % (config["host"], port))
connected = True
break
except urllib2.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server on http://%s:%s You may need to edit /etc/hosts or similar" % (config["host"], port))
sys.exit(1)
for subdomain, (punycode, host) in subdomains.iteritems():
domain = "%s.%s" % (punycode, host)
try:
urllib2.urlopen("http://%s:%d/" % (domain, port))
except Exception as e:
logger.critical("Failed probing domain %s. You may need to edit /etc/hosts or similar." % domain)
sys.exit(1)
wrapper.wait()
def get_subdomains(config):
#This assumes that the tld is ascii-only or already in punycode
host = config["host"]
return {subdomain: (subdomain.encode("idna"), host)
for subdomain in subdomains}
def start_servers(config, paths, ports, bind_hostname):
servers = defaultdict(list)
host = config["host"]
for scheme, ports in ports.iteritems():
assert len(ports) == {"http":2}.get(scheme, 1)
for port in ports:
init_func = {"http":start_http_server,
"https":start_https_server,
"ws":start_ws_server,
"wss":start_wss_server}[scheme]
server_proc = ServerProc()
server_proc.start(init_func, config, paths, port, bind_hostname)
servers[scheme].append((port, server_proc))
return servers
def start_http_server(config, paths, port, bind_hostname):
return wptserve.WebTestHttpd(host=config["host"],
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_hostname=bind_hostname,
config=config,
use_ssl=False,
certificate=None)
def start_https_server(config, paths, port, bind_hostname):
return
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, log_level, bind_hostname):
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root,
"--log-level", log_level]
if (bind_hostname):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self, block=False):
self.started = True
if block:
self.server.serve_forever()
else:
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def start_ws_server(config, paths, port, bind_hostname):
return WebSocketDaemon(config["host"],
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_hostname)
def start_wss_server(config, paths, port, bind_hostname):
return
def get_ports(config):
rv = defaultdict(list)
for scheme, ports in config["ports"].iteritems():
for i, port in enumerate(ports):
if port == "auto":
port = get_port()
else:
port = port
rv[scheme].append(port)
return rv
def normalise_config(config, domains, ports):
ports_ = {}
for scheme, ports_used in ports.iteritems():
ports_[scheme] = ports_used
domains_ = domains.copy()
for key, value in domains_.iteritems():
domains_[key] = ".".join(value)
domains_[""] = config["host"]
return {"host":config["host"],
"domains":domains_,
"ports": ports_}
def start(config):
ports = get_ports(config)
domains = get_subdomains(config)
bind_hostname = config["bind_hostname"]
paths = {"doc_root": config["doc_root"],
"ws_doc_root": config["ws_doc_root"]}
if config["check_subdomains"]:
check_subdomains(config, paths, domains, bind_hostname)
config_ = normalise_config(config, domains, ports)
servers = start_servers(config_, paths, ports, bind_hostname)
return config_, servers
def iter_procs(servers):
for servers in servers.values():
for port, server in servers:
yield server.proc
def value_set(config, key):
return key in config and config[key] is not None
def set_computed_defaults(config):
if not value_set(config, "ws_doc_root"):
if value_set(config, "doc_root"):
root = config["doc_root"]
else:
root = repo_root
config["ws_doc_root"] = os.path.join(repo_root, "websockets", "handlers")
if not value_set(config, "doc_root"):
config["doc_root"] = repo_root
def merge_json(base_obj, override_obj):
rv = {}
for key, value in base_obj.iteritems():
if key not in override_obj:
rv[key] = value
else:
if isinstance(value, dict):
rv[key] = merge_json(value, override_obj[key])
else:
rv[key] = override_obj[key]
return rv
def load_config(default_path, override_path=None):
if os.path.exists(default_path):
with open(default_path) as f:
base_obj = json.load(f)
else:
raise ValueError("Config path %s does not exist" % default_path)
if os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
else:
override_obj = {}
rv = merge_json(base_obj, override_obj)
set_computed_defaults(rv)
return rv
def main():
global logger
config = load_config("config.default.json",
"config.json")
logger = default_logger(config["log_level"])
config_, servers = start(config)
try:
while any(item.is_alive() for item in iter_procs(servers)):
for item in iter_procs(servers):
item.join(1)
except KeyboardInterrupt:
logger.info("Shutting down")
if __name__ == "__main__":
main()
| {
"content_hash": "9ad8a683865015da948f94cdd5336e0c",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 143,
"avg_line_length": 30.78635014836795,
"alnum_prop": 0.5702168674698795,
"repo_name": "smilusingjavascript/blink",
"id": "35a7c7262d5d25511fba6976fedbe8c7a2cfd7b2",
"size": "10389",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "LayoutTests/imported/web-platform-tests/serve.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "14584"
},
{
"name": "C",
"bytes": "1445607"
},
{
"name": "C++",
"bytes": "40292420"
},
{
"name": "CSS",
"bytes": "536635"
},
{
"name": "Java",
"bytes": "74957"
},
{
"name": "JavaScript",
"bytes": "26642040"
},
{
"name": "Objective-C",
"bytes": "35696"
},
{
"name": "Objective-C++",
"bytes": "341384"
},
{
"name": "PHP",
"bytes": "168951"
},
{
"name": "Perl",
"bytes": "583826"
},
{
"name": "Python",
"bytes": "3802000"
},
{
"name": "Ruby",
"bytes": "141818"
},
{
"name": "Shell",
"bytes": "8923"
},
{
"name": "XSLT",
"bytes": "49099"
}
],
"symlink_target": ""
} |
import putio
import os.path
from putiox import PutioEx
from file import FileEx
from subtitles import SubtitleEx
from sqlite import SQLiteEx
import logging
from utils import mkdir_p
logger = logging.getLogger(__name__)
class Orchestra(object):
def __init__(self, apiToken, moviesDir, seriesDir):
handle = putio.Client(apiToken)
self.pclient = PutioEx(handle)
self.psubtitles = SubtitleEx(handle)
self.db = SQLiteEx('orchestra.db', self.pclient)
self.moviesDir = moviesDir
self.seriesDir = seriesDir
if not os.path.exists(self.moviesDir):
os.mkdir(self.moviesDir)
self.movieId = self.pclient.getDirectory("Movies")
if not os.path.exists(self.seriesDir):
os.mkdir(self.seriesDir)
self.seriesId = self.pclient.getDirectory("Series")
def startup(self):
self.pclient.parseDirectories(self.movieId, self.listMovie)
self.pclient.parseDirectories(self.seriesId, self.listSeries)
self.db.listNonCompletedFiles(self.downloadMovieData, FileEx.MOVIES)
self.db.listNonCompletedFiles(self.downloadSeriesData, FileEx.SERIES)
def listMovie(self, file):
if self.pclient.prepareMp4(file):
self.db.storeFileInfo(file, FileEx.MOVIES)
def listSeries(self, file):
if self.pclient.prepareMp4(file):
self.db.storeFileInfo(file, FileEx.SERIES)
def downloadMovieData(self, file):
subtitle = self.psubtitles.getSubtitles(file)
if subtitle is not None:
movieName = subtitle.movieName + " (" + subtitle.movieYear + ")";
logger.info(movieName)
self.downloadMovie(file, movieName.replace("/", "_"), subtitle)
def downloadSeriesData(self, file):
subtitle = self.psubtitles.getSubtitles(file)
if subtitle is not None:
serieEpisode = subtitle.serieName + ".s%02de%02d" % ( subtitle.serieSeason , subtitle.serieEpisode)
logger.info(serieEpisode)
self.downloadSerie(file, subtitle.serieName, subtitle.serieSeason, serieEpisode.replace("/", "_"), subtitle)
def downloadMovie(self, file, fileName, subtitle):
def __downloadMovieMP4():
self.psubtitles.downloadSubtitles(subtitle, os.path.join(self.moviesDir, fileName) + ".srt")
self.pclient.downloadMP4(file, os.path.join(self.moviesDir, fileName) + ".mp4")
if self.pclient.isMP4Complete(file):
self.db.runTransact(__downloadMovieMP4, "update files set downloaded = 1, moviedb_name = ? where id = ? ", (fileName, file.id))
def downloadSerie(self, file, serieName, serieSeason, fileName, subtitle):
def __downloadSerieMP4():
episodeDir = os.path.join(self.seriesDir, serieName, "season%d" % serieSeason)
if not os.path.exists(episodeDir):
mkdir_p(episodeDir)
self.psubtitles.downloadSubtitles(subtitle, os.path.join(episodeDir, fileName) + ".srt")
self.pclient.downloadMP4(file, os.path.join(episodeDir, fileName) + ".mp4")
if self.pclient.isMP4Complete(file):
self.db.runTransact(__downloadSerieMP4, "update files set downloaded = 1, moviedb_name = ? where id = ? ", (fileName, file.id))
| {
"content_hash": "5e1dc2d175f6eef94854ab71949e62a6",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 130,
"avg_line_length": 35.566265060240966,
"alnum_prop": 0.7388211382113821,
"repo_name": "Alkpone/Orchestra",
"id": "e4e5bcb3dc427f7bff62465a5fddbccc67559942",
"size": "2952",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/orchestra/orchestra.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24271"
}
],
"symlink_target": ""
} |
import numpy
import numpy as np
from . import _ni_support
from . import _ni_label
from . import _nd_image
from . import _morphology
__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean',
'variance', 'standard_deviation', 'minimum', 'maximum', 'median',
'minimum_position', 'maximum_position', 'extrema', 'center_of_mass',
'histogram', 'watershed_ift', 'sum_labels']
def label(input, structure=None, output=None):
"""
Label features in an array.
Parameters
----------
input : array_like
An array-like object to be labeled. Any non-zero values in `input` are
counted as features and zero values are considered the background.
structure : array_like, optional
A structuring element that defines feature connections.
`structure` must be centrosymmetric
(see Notes).
If no structuring element is provided,
one is automatically generated with a squared connectivity equal to
one. That is, for a 2-D `input` array, the default structuring element
is::
[[0,1,0],
[1,1,1],
[0,1,0]]
output : (None, data-type, array_like), optional
If `output` is a data type, it specifies the type of the resulting
labeled feature array.
If `output` is an array-like object, then `output` will be updated
with the labeled features from this function. This function can
operate in-place, by passing output=input.
Note that the output must be able to store the largest label, or this
function will raise an Exception.
Returns
-------
label : ndarray or int
An integer ndarray where each unique feature in `input` has a unique
label in the returned array.
num_features : int
How many objects were found.
If `output` is None, this function returns a tuple of
(`labeled_array`, `num_features`).
If `output` is a ndarray, then it will be updated with values in
`labeled_array` and only `num_features` will be returned by this
function.
See Also
--------
find_objects : generate a list of slices for the labeled features (or
objects); useful for finding features' position or
dimensions
Notes
-----
A centrosymmetric matrix is a matrix that is symmetric about the center.
See [1]_ for more information.
The `structure` matrix must be centrosymmetric to ensure
two-way connections.
For instance, if the `structure` matrix is not centrosymmetric
and is defined as::
[[0,1,0],
[1,1,0],
[0,0,0]]
and the `input` is::
[[1,2],
[0,3]]
then the structure matrix would indicate the
entry 2 in the input is connected to 1,
but 1 is not connected to 2.
Examples
--------
Create an image with some features, then label it using the default
(cross-shaped) structuring element:
>>> from scipy.ndimage import label, generate_binary_structure
>>> a = np.array([[0,0,1,1,0,0],
... [0,0,0,1,0,0],
... [1,1,0,0,1,0],
... [0,0,0,1,0,0]])
>>> labeled_array, num_features = label(a)
Each of the 4 features are labeled with a different integer:
>>> num_features
4
>>> labeled_array
array([[0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[2, 2, 0, 0, 3, 0],
[0, 0, 0, 4, 0, 0]])
Generate a structuring element that will consider features connected even
if they touch diagonally:
>>> s = generate_binary_structure(2,2)
or,
>>> s = [[1,1,1],
... [1,1,1],
... [1,1,1]]
Label the image using the new structuring element:
>>> labeled_array, num_features = label(a, structure=s)
Show the 2 labeled features (note that features 1, 3, and 4 from above are
now considered a single feature):
>>> num_features
2
>>> labeled_array
array([[0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[2, 2, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0]])
References
----------
.. [1] James R. Weaver, "Centrosymmetric (cross-symmetric)
matrices, their basic properties, eigenvalues, and
eigenvectors." The American Mathematical Monthly 92.10
(1985): 711-717.
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if structure is None:
structure = _morphology.generate_binary_structure(input.ndim, 1)
structure = numpy.asarray(structure, dtype=bool)
if structure.ndim != input.ndim:
raise RuntimeError('structure and input must have equal rank')
for ii in structure.shape:
if ii != 3:
raise ValueError('structure dimensions must be equal to 3')
# Use 32 bits if it's large enough for this image.
# _ni_label.label() needs two entries for background and
# foreground tracking
need_64bits = input.size >= (2**31 - 2)
if isinstance(output, numpy.ndarray):
if output.shape != input.shape:
raise ValueError("output shape not correct")
caller_provided_output = True
else:
caller_provided_output = False
if output is None:
output = np.empty(input.shape, np.intp if need_64bits else np.int32)
else:
output = np.empty(input.shape, output)
# handle scalars, 0-D arrays
if input.ndim == 0 or input.size == 0:
if input.ndim == 0:
# scalar
maxlabel = 1 if (input != 0) else 0
output[...] = maxlabel
else:
# 0-D
maxlabel = 0
if caller_provided_output:
return maxlabel
else:
return output, maxlabel
try:
max_label = _ni_label._label(input, structure, output)
except _ni_label.NeedMoreBits as e:
# Make another attempt with enough bits, then try to cast to the
# new type.
tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32)
max_label = _ni_label._label(input, structure, tmp_output)
output[...] = tmp_output[...]
if not np.all(output == tmp_output):
# refuse to return bad results
raise RuntimeError(
"insufficient bit-depth in requested output type"
) from e
if caller_provided_output:
# result was written in-place
return max_label
else:
return output, max_label
def find_objects(input, max_label=0):
"""
Find objects in a labeled array.
Parameters
----------
input : ndarray of ints
Array containing objects defined by different labels. Labels with
value 0 are ignored.
max_label : int, optional
Maximum label to be searched for in `input`. If max_label is not
given, the positions of all objects are returned.
Returns
-------
object_slices : list of tuples
A list of tuples, with each tuple containing N slices (with N the
dimension of the input array). Slices correspond to the minimal
parallelepiped that contains the object. If a number is missing,
None is returned instead of a slice.
See Also
--------
label, center_of_mass
Notes
-----
This function is very useful for isolating a volume of interest inside
a 3-D array, that cannot be "seen through".
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((6,6), dtype=int)
>>> a[2:4, 2:4] = 1
>>> a[4, 4] = 1
>>> a[:2, :3] = 2
>>> a[0, 5] = 3
>>> a
array([[2, 2, 2, 0, 0, 3],
[2, 2, 2, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0]])
>>> ndimage.find_objects(a)
[(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None)), (slice(0, 1, None), slice(5, 6, None))]
>>> ndimage.find_objects(a, max_label=2)
[(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))]
>>> ndimage.find_objects(a == 1, max_label=2)
[(slice(2, 5, None), slice(2, 5, None)), None]
>>> loc = ndimage.find_objects(a)[0]
>>> a[loc]
array([[1, 1, 0],
[1, 1, 0],
[0, 0, 1]])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if max_label < 1:
max_label = input.max()
return _nd_image.find_objects(input, max_label)
def labeled_comprehension(input, labels, index, func, out_dtype, default, pass_positions=False):
"""
Roughly equivalent to [func(input[labels == i]) for i in index].
Sequentially applies an arbitrary function (that works on array_like input)
to subsets of an N-D image array specified by `labels` and `index`.
The option exists to provide the function with positional parameters as the
second argument.
Parameters
----------
input : array_like
Data from which to select `labels` to process.
labels : array_like or None
Labels to objects in `input`.
If not None, array must be same shape as `input`.
If None, `func` is applied to raveled `input`.
index : int, sequence of ints or None
Subset of `labels` to which to apply `func`.
If a scalar, a single value is returned.
If None, `func` is applied to all non-zero values of `labels`.
func : callable
Python function to apply to `labels` from `input`.
out_dtype : dtype
Dtype to use for `result`.
default : int, float or None
Default return value when a element of `index` does not exist
in `labels`.
pass_positions : bool, optional
If True, pass linear indices to `func` as a second argument.
Default is False.
Returns
-------
result : ndarray
Result of applying `func` to each of `labels` to `input` in `index`.
Examples
--------
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> from scipy import ndimage
>>> lbl, nlbl = ndimage.label(a)
>>> lbls = np.arange(1, nlbl+1)
>>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0)
array([ 2.75, 5.5 , 6. ])
Falling back to `default`:
>>> lbls = np.arange(1, nlbl+2)
>>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1)
array([ 2.75, 5.5 , 6. , -1. ])
Passing positions:
>>> def fn(val, pos):
... print("fn says: %s : %s" % (val, pos))
... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum())
...
>>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True)
fn says: [1 2 5 3] : [0 1 4 5]
fn says: [4 7] : [ 7 11]
fn says: [9 3] : [12 13]
array([ 11., 11., -12., 0.])
"""
as_scalar = numpy.isscalar(index)
input = numpy.asarray(input)
if pass_positions:
positions = numpy.arange(input.size).reshape(input.shape)
if labels is None:
if index is not None:
raise ValueError("index without defined labels")
if not pass_positions:
return func(input.ravel())
else:
return func(input.ravel(), positions.ravel())
try:
input, labels = numpy.broadcast_arrays(input, labels)
except ValueError as e:
raise ValueError("input and labels must have the same shape "
"(excepting dimensions with width 1)") from e
if index is None:
if not pass_positions:
return func(input[labels > 0])
else:
return func(input[labels > 0], positions[labels > 0])
index = numpy.atleast_1d(index)
if np.any(index.astype(labels.dtype).astype(index.dtype) != index):
raise ValueError("Cannot convert index values from <%s> to <%s> "
"(labels' type) without loss of precision" %
(index.dtype, labels.dtype))
index = index.astype(labels.dtype)
# optimization: find min/max in index, and select those parts of labels, input, and positions
lo = index.min()
hi = index.max()
mask = (labels >= lo) & (labels <= hi)
# this also ravels the arrays
labels = labels[mask]
input = input[mask]
if pass_positions:
positions = positions[mask]
# sort everything by labels
label_order = labels.argsort()
labels = labels[label_order]
input = input[label_order]
if pass_positions:
positions = positions[label_order]
index_order = index.argsort()
sorted_index = index[index_order]
def do_map(inputs, output):
"""labels must be sorted"""
nidx = sorted_index.size
# Find boundaries for each stretch of constant labels
# This could be faster, but we already paid N log N to sort labels.
lo = numpy.searchsorted(labels, sorted_index, side='left')
hi = numpy.searchsorted(labels, sorted_index, side='right')
for i, l, h in zip(range(nidx), lo, hi):
if l == h:
continue
output[i] = func(*[inp[l:h] for inp in inputs])
temp = numpy.empty(index.shape, out_dtype)
temp[:] = default
if not pass_positions:
do_map([input], temp)
else:
do_map([input, positions], temp)
output = numpy.zeros(index.shape, out_dtype)
output[index_order] = temp
if as_scalar:
output = output[0]
return output
def _safely_castable_to_int(dt):
"""Test whether the NumPy data type `dt` can be safely cast to an int."""
int_size = np.dtype(int).itemsize
safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or
(np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size))
return safe
def _stats(input, labels=None, index=None, centered=False):
"""Count, sum, and optionally compute (sum - centre)^2 of input by label
Parameters
----------
input : array_like, N-D
The input data to be analyzed.
labels : array_like (N-D), optional
The labels of the data in `input`. This array must be broadcast
compatible with `input`; typically, it is the same shape as `input`.
If `labels` is None, all nonzero values in `input` are treated as
the single labeled group.
index : label or sequence of labels, optional
These are the labels of the groups for which the stats are computed.
If `index` is None, the stats are computed for the single group where
`labels` is greater than 0.
centered : bool, optional
If True, the centered sum of squares for each labeled group is
also returned. Default is False.
Returns
-------
counts : int or ndarray of ints
The number of elements in each labeled group.
sums : scalar or ndarray of scalars
The sums of the values in each labeled group.
sums_c : scalar or ndarray of scalars, optional
The sums of mean-centered squares of the values in each labeled group.
This is only returned if `centered` is True.
"""
def single_group(vals):
if centered:
vals_c = vals - vals.mean()
return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum()
else:
return vals.size, vals.sum()
if labels is None:
return single_group(input)
# ensure input and labels match sizes
input, labels = numpy.broadcast_arrays(input, labels)
if index is None:
return single_group(input[labels > 0])
if numpy.isscalar(index):
return single_group(input[labels == index])
def _sum_centered(labels):
# `labels` is expected to be an ndarray with the same shape as `input`.
# It must contain the label indices (which are not necessarily the labels
# themselves).
means = sums / counts
centered_input = input - means[labels]
# bincount expects 1-D inputs, so we ravel the arguments.
bc = numpy.bincount(labels.ravel(),
weights=(centered_input *
centered_input.conjugate()).ravel())
return bc
# Remap labels to unique integers if necessary, or if the largest
# label is larger than the number of values.
if (not _safely_castable_to_int(labels.dtype) or
labels.min() < 0 or labels.max() > labels.size):
# Use numpy.unique to generate the label indices. `new_labels` will
# be 1-D, but it should be interpreted as the flattened N-D array of
# label indices.
unique_labels, new_labels = numpy.unique(labels, return_inverse=True)
counts = numpy.bincount(new_labels)
sums = numpy.bincount(new_labels, weights=input.ravel())
if centered:
# Compute the sum of the mean-centered squares.
# We must reshape new_labels to the N-D shape of `input` before
# passing it _sum_centered.
sums_c = _sum_centered(new_labels.reshape(labels.shape))
idxs = numpy.searchsorted(unique_labels, index)
# make all of idxs valid
idxs[idxs >= unique_labels.size] = 0
found = (unique_labels[idxs] == index)
else:
# labels are an integer type allowed by bincount, and there aren't too
# many, so call bincount directly.
counts = numpy.bincount(labels.ravel())
sums = numpy.bincount(labels.ravel(), weights=input.ravel())
if centered:
sums_c = _sum_centered(labels)
# make sure all index values are valid
idxs = numpy.asanyarray(index, numpy.int_).copy()
found = (idxs >= 0) & (idxs < counts.size)
idxs[~found] = 0
counts = counts[idxs]
counts[~found] = 0
sums = sums[idxs]
sums[~found] = 0
if not centered:
return (counts, sums)
else:
sums_c = sums_c[idxs]
sums_c[~found] = 0
return (counts, sums, sums_c)
def sum(input, labels=None, index=None):
"""
Calculate the sum of the values of the array.
Notes
-----
This is an alias for `ndimage.sum_labels` kept for backwards compatibility
reasons, for new code please prefer `sum_labels`. See the `sum_labels`
docstring for more details.
"""
return sum_labels(input, labels, index)
def sum_labels(input, labels=None, index=None):
"""
Calculate the sum of the values of the array.
Parameters
----------
input : array_like
Values of `input` inside the regions defined by `labels`
are summed together.
labels : array_like of ints, optional
Assign labels to the values of the array. Has to have the same shape as
`input`.
index : array_like, optional
A single label number or a sequence of label numbers of
the objects to be measured.
Returns
-------
sum : ndarray or scalar
An array of the sums of values of `input` inside the regions defined
by `labels` with the same shape as `index`. If 'index' is None or scalar,
a scalar is returned.
See also
--------
mean, median
Examples
--------
>>> from scipy import ndimage
>>> input = [0,1,2,3]
>>> labels = [1,1,2,2]
>>> ndimage.sum(input, labels, index=[1,2])
[1.0, 5.0]
>>> ndimage.sum(input, labels, index=1)
1
>>> ndimage.sum(input, labels)
6
"""
count, sum = _stats(input, labels, index)
return sum
def mean(input, labels=None, index=None):
"""
Calculate the mean of the values of an array at labels.
Parameters
----------
input : array_like
Array on which to compute the mean of elements over distinct
regions.
labels : array_like, optional
Array of labels of same shape, or broadcastable to the same shape as
`input`. All elements sharing the same label form one region over
which the mean of the elements is computed.
index : int or sequence of ints, optional
Labels of the objects over which the mean is to be computed.
Default is None, in which case the mean for all values where label is
greater than 0 is calculated.
Returns
-------
out : list
Sequence of same length as `index`, with the mean of the different
regions labeled by the labels in `index`.
See also
--------
variance, standard_deviation, minimum, maximum, sum, label
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(25).reshape((5,5))
>>> labels = np.zeros_like(a)
>>> labels[3:5,3:5] = 1
>>> index = np.unique(labels)
>>> labels
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1]])
>>> index
array([0, 1])
>>> ndimage.mean(a, labels=labels, index=index)
[10.285714285714286, 21.0]
"""
count, sum = _stats(input, labels, index)
return sum / numpy.asanyarray(count).astype(numpy.float64)
def variance(input, labels=None, index=None):
"""
Calculate the variance of the values of an N-D image array, optionally at
specified sub-regions.
Parameters
----------
input : array_like
Nd-image data to process.
labels : array_like, optional
Labels defining sub-regions in `input`.
If not None, must be same shape as `input`.
index : int or sequence of ints, optional
`labels` to include in output. If None (default), all values where
`labels` is non-zero are used.
Returns
-------
variance : float or ndarray
Values of variance, for each sub-region if `labels` and `index` are
specified.
See Also
--------
label, standard_deviation, maximum, minimum, extrema
Examples
--------
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> from scipy import ndimage
>>> ndimage.variance(a)
7.609375
Features to process can be specified using `labels` and `index`:
>>> lbl, nlbl = ndimage.label(a)
>>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1))
array([ 2.1875, 2.25 , 9. ])
If no index is given, all non-zero `labels` are processed:
>>> ndimage.variance(a, lbl)
6.1875
"""
count, sum, sum_c_sq = _stats(input, labels, index, centered=True)
return sum_c_sq / np.asanyarray(count).astype(float)
def standard_deviation(input, labels=None, index=None):
"""
Calculate the standard deviation of the values of an N-D image array,
optionally at specified sub-regions.
Parameters
----------
input : array_like
N-D image data to process.
labels : array_like, optional
Labels to identify sub-regions in `input`.
If not None, must be same shape as `input`.
index : int or sequence of ints, optional
`labels` to include in output. If None (default), all values where
`labels` is non-zero are used.
Returns
-------
standard_deviation : float or ndarray
Values of standard deviation, for each sub-region if `labels` and
`index` are specified.
See Also
--------
label, variance, maximum, minimum, extrema
Examples
--------
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> from scipy import ndimage
>>> ndimage.standard_deviation(a)
2.7585095613392387
Features to process can be specified using `labels` and `index`:
>>> lbl, nlbl = ndimage.label(a)
>>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1))
array([ 1.479, 1.5 , 3. ])
If no index is given, non-zero `labels` are processed:
>>> ndimage.standard_deviation(a, lbl)
2.4874685927665499
"""
return numpy.sqrt(variance(input, labels, index))
def _select(input, labels=None, index=None, find_min=False, find_max=False,
find_min_positions=False, find_max_positions=False,
find_median=False):
"""Returns min, max, or both, plus their positions (if requested), and
median."""
input = numpy.asanyarray(input)
find_positions = find_min_positions or find_max_positions
positions = None
if find_positions:
positions = numpy.arange(input.size).reshape(input.shape)
def single_group(vals, positions):
result = []
if find_min:
result += [vals.min()]
if find_min_positions:
result += [positions[vals == vals.min()][0]]
if find_max:
result += [vals.max()]
if find_max_positions:
result += [positions[vals == vals.max()][0]]
if find_median:
result += [numpy.median(vals)]
return result
if labels is None:
return single_group(input, positions)
# ensure input and labels match sizes
input, labels = numpy.broadcast_arrays(input, labels)
if index is None:
mask = (labels > 0)
masked_positions = None
if find_positions:
masked_positions = positions[mask]
return single_group(input[mask], masked_positions)
if numpy.isscalar(index):
mask = (labels == index)
masked_positions = None
if find_positions:
masked_positions = positions[mask]
return single_group(input[mask], masked_positions)
# remap labels to unique integers if necessary, or if the largest
# label is larger than the number of values.
if (not _safely_castable_to_int(labels.dtype) or
labels.min() < 0 or labels.max() > labels.size):
# remap labels, and indexes
unique_labels, labels = numpy.unique(labels, return_inverse=True)
idxs = numpy.searchsorted(unique_labels, index)
# make all of idxs valid
idxs[idxs >= unique_labels.size] = 0
found = (unique_labels[idxs] == index)
else:
# labels are an integer type, and there aren't too many
idxs = numpy.asanyarray(index, numpy.int_).copy()
found = (idxs >= 0) & (idxs <= labels.max())
idxs[~ found] = labels.max() + 1
if find_median:
order = numpy.lexsort((input.ravel(), labels.ravel()))
else:
order = input.ravel().argsort()
input = input.ravel()[order]
labels = labels.ravel()[order]
if find_positions:
positions = positions.ravel()[order]
result = []
if find_min:
mins = numpy.zeros(labels.max() + 2, input.dtype)
mins[labels[::-1]] = input[::-1]
result += [mins[idxs]]
if find_min_positions:
minpos = numpy.zeros(labels.max() + 2, int)
minpos[labels[::-1]] = positions[::-1]
result += [minpos[idxs]]
if find_max:
maxs = numpy.zeros(labels.max() + 2, input.dtype)
maxs[labels] = input
result += [maxs[idxs]]
if find_max_positions:
maxpos = numpy.zeros(labels.max() + 2, int)
maxpos[labels] = positions
result += [maxpos[idxs]]
if find_median:
locs = numpy.arange(len(labels))
lo = numpy.zeros(labels.max() + 2, numpy.int_)
lo[labels[::-1]] = locs[::-1]
hi = numpy.zeros(labels.max() + 2, numpy.int_)
hi[labels] = locs
lo = lo[idxs]
hi = hi[idxs]
# lo is an index to the lowest value in input for each label,
# hi is an index to the largest value.
# move them to be either the same ((hi - lo) % 2 == 0) or next
# to each other ((hi - lo) % 2 == 1), then average.
step = (hi - lo) // 2
lo += step
hi -= step
if (np.issubdtype(input.dtype, np.integer)
or np.issubdtype(input.dtype, np.bool_)):
# avoid integer overflow or boolean addition (gh-12836)
result += [(input[lo].astype('d') + input[hi].astype('d')) / 2.0]
else:
result += [(input[lo] + input[hi]) / 2.0]
return result
def minimum(input, labels=None, index=None):
"""
Calculate the minimum of the values of an array over labeled regions.
Parameters
----------
input : array_like
Array_like of values. For each region specified by `labels`, the
minimal values of `input` over the region is computed.
labels : array_like, optional
An array_like of integers marking different regions over which the
minimum value of `input` is to be computed. `labels` must have the
same shape as `input`. If `labels` is not specified, the minimum
over the whole array is returned.
index : array_like, optional
A list of region labels that are taken into account for computing the
minima. If index is None, the minimum over all elements where `labels`
is non-zero is returned.
Returns
-------
minimum : float or list of floats
List of minima of `input` over the regions determined by `labels` and
whose index is in `index`. If `index` or `labels` are not specified, a
float is returned: the minimal value of `input` if `labels` is None,
and the minimal value of elements where `labels` is greater than zero
if `index` is None.
See also
--------
label, maximum, median, minimum_position, extrema, sum, mean, variance,
standard_deviation
Notes
-----
The function returns a Python list and not a NumPy array, use
`np.array` to convert the list to an array.
Examples
--------
>>> from scipy import ndimage
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> labels, labels_nb = ndimage.label(a)
>>> labels
array([[1, 1, 0, 0],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
>>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1))
[1.0, 4.0, 3.0]
>>> ndimage.minimum(a)
0.0
>>> ndimage.minimum(a, labels=labels)
1.0
"""
return _select(input, labels, index, find_min=True)[0]
def maximum(input, labels=None, index=None):
"""
Calculate the maximum of the values of an array over labeled regions.
Parameters
----------
input : array_like
Array_like of values. For each region specified by `labels`, the
maximal values of `input` over the region is computed.
labels : array_like, optional
An array of integers marking different regions over which the
maximum value of `input` is to be computed. `labels` must have the
same shape as `input`. If `labels` is not specified, the maximum
over the whole array is returned.
index : array_like, optional
A list of region labels that are taken into account for computing the
maxima. If index is None, the maximum over all elements where `labels`
is non-zero is returned.
Returns
-------
output : float or list of floats
List of maxima of `input` over the regions determined by `labels` and
whose index is in `index`. If `index` or `labels` are not specified, a
float is returned: the maximal value of `input` if `labels` is None,
and the maximal value of elements where `labels` is greater than zero
if `index` is None.
See also
--------
label, minimum, median, maximum_position, extrema, sum, mean, variance,
standard_deviation
Notes
-----
The function returns a Python list and not a NumPy array, use
`np.array` to convert the list to an array.
Examples
--------
>>> a = np.arange(16).reshape((4,4))
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> labels = np.zeros_like(a)
>>> labels[:2,:2] = 1
>>> labels[2:, 1:3] = 2
>>> labels
array([[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 2, 2, 0],
[0, 2, 2, 0]])
>>> from scipy import ndimage
>>> ndimage.maximum(a)
15.0
>>> ndimage.maximum(a, labels=labels, index=[1,2])
[5.0, 14.0]
>>> ndimage.maximum(a, labels=labels)
14.0
>>> b = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> labels, labels_nb = ndimage.label(b)
>>> labels
array([[1, 1, 0, 0],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
>>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1))
[5.0, 7.0, 9.0]
"""
return _select(input, labels, index, find_max=True)[0]
def median(input, labels=None, index=None):
"""
Calculate the median of the values of an array over labeled regions.
Parameters
----------
input : array_like
Array_like of values. For each region specified by `labels`, the
median value of `input` over the region is computed.
labels : array_like, optional
An array_like of integers marking different regions over which the
median value of `input` is to be computed. `labels` must have the
same shape as `input`. If `labels` is not specified, the median
over the whole array is returned.
index : array_like, optional
A list of region labels that are taken into account for computing the
medians. If index is None, the median over all elements where `labels`
is non-zero is returned.
Returns
-------
median : float or list of floats
List of medians of `input` over the regions determined by `labels` and
whose index is in `index`. If `index` or `labels` are not specified, a
float is returned: the median value of `input` if `labels` is None,
and the median value of elements where `labels` is greater than zero
if `index` is None.
See also
--------
label, minimum, maximum, extrema, sum, mean, variance, standard_deviation
Notes
-----
The function returns a Python list and not a NumPy array, use
`np.array` to convert the list to an array.
Examples
--------
>>> from scipy import ndimage
>>> a = np.array([[1, 2, 0, 1],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> labels, labels_nb = ndimage.label(a)
>>> labels
array([[1, 1, 0, 2],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
>>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1))
[2.5, 4.0, 6.0]
>>> ndimage.median(a)
1.0
>>> ndimage.median(a, labels=labels)
3.0
"""
return _select(input, labels, index, find_median=True)[0]
def minimum_position(input, labels=None, index=None):
"""
Find the positions of the minimums of the values of an array at labels.
Parameters
----------
input : array_like
Array_like of values.
labels : array_like, optional
An array of integers marking different regions over which the
position of the minimum value of `input` is to be computed.
`labels` must have the same shape as `input`. If `labels` is not
specified, the location of the first minimum over the whole
array is returned.
The `labels` argument only works when `index` is specified.
index : array_like, optional
A list of region labels that are taken into account for finding the
location of the minima. If `index` is None, the ``first`` minimum
over all elements where `labels` is non-zero is returned.
The `index` argument only works when `labels` is specified.
Returns
-------
output : list of tuples of ints
Tuple of ints or list of tuples of ints that specify the location
of minima of `input` over the regions determined by `labels` and
whose index is in `index`.
If `index` or `labels` are not specified, a tuple of ints is
returned specifying the location of the first minimal value of `input`.
See also
--------
label, minimum, median, maximum_position, extrema, sum, mean, variance,
standard_deviation
Examples
--------
>>> a = np.array([[10, 20, 30],
... [40, 80, 100],
... [1, 100, 200]])
>>> b = np.array([[1, 2, 0, 1],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> from scipy import ndimage
>>> ndimage.minimum_position(a)
(2, 0)
>>> ndimage.minimum_position(b)
(0, 2)
Features to process can be specified using `labels` and `index`:
>>> label, pos = ndimage.label(a)
>>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1))
[(2, 0)]
>>> label, pos = ndimage.label(b)
>>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1))
[(0, 0), (0, 3), (3, 1)]
"""
dims = numpy.array(numpy.asarray(input).shape)
# see numpy.unravel_index to understand this line.
dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
result = _select(input, labels, index, find_min_positions=True)[0]
if numpy.isscalar(result):
return tuple((result // dim_prod) % dims)
return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
def maximum_position(input, labels=None, index=None):
"""
Find the positions of the maximums of the values of an array at labels.
For each region specified by `labels`, the position of the maximum
value of `input` within the region is returned.
Parameters
----------
input : array_like
Array_like of values.
labels : array_like, optional
An array of integers marking different regions over which the
position of the maximum value of `input` is to be computed.
`labels` must have the same shape as `input`. If `labels` is not
specified, the location of the first maximum over the whole
array is returned.
The `labels` argument only works when `index` is specified.
index : array_like, optional
A list of region labels that are taken into account for finding the
location of the maxima. If `index` is None, the first maximum
over all elements where `labels` is non-zero is returned.
The `index` argument only works when `labels` is specified.
Returns
-------
output : list of tuples of ints
List of tuples of ints that specify the location of maxima of
`input` over the regions determined by `labels` and whose index
is in `index`.
If `index` or `labels` are not specified, a tuple of ints is
returned specifying the location of the ``first`` maximal value
of `input`.
See also
--------
label, minimum, median, maximum_position, extrema, sum, mean, variance,
standard_deviation
Examples
--------
>>> from scipy import ndimage
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> ndimage.maximum_position(a)
(3, 0)
Features to process can be specified using `labels` and `index`:
>>> lbl = np.array([[0, 1, 2, 3],
... [0, 1, 2, 3],
... [0, 1, 2, 3],
... [0, 1, 2, 3]])
>>> ndimage.maximum_position(a, lbl, 1)
(1, 1)
If no index is given, non-zero `labels` are processed:
>>> ndimage.maximum_position(a, lbl)
(2, 3)
If there are no maxima, the position of the first element is returned:
>>> ndimage.maximum_position(a, lbl, 2)
(0, 2)
"""
dims = numpy.array(numpy.asarray(input).shape)
# see numpy.unravel_index to understand this line.
dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
result = _select(input, labels, index, find_max_positions=True)[0]
if numpy.isscalar(result):
return tuple((result // dim_prod) % dims)
return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
def extrema(input, labels=None, index=None):
"""
Calculate the minimums and maximums of the values of an array
at labels, along with their positions.
Parameters
----------
input : ndarray
N-D image data to process.
labels : ndarray, optional
Labels of features in input.
If not None, must be same shape as `input`.
index : int or sequence of ints, optional
Labels to include in output. If None (default), all values where
non-zero `labels` are used.
Returns
-------
minimums, maximums : int or ndarray
Values of minimums and maximums in each feature.
min_positions, max_positions : tuple or list of tuples
Each tuple gives the N-D coordinates of the corresponding minimum
or maximum.
See Also
--------
maximum, minimum, maximum_position, minimum_position, center_of_mass
Examples
--------
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> from scipy import ndimage
>>> ndimage.extrema(a)
(0, 9, (0, 2), (3, 0))
Features to process can be specified using `labels` and `index`:
>>> lbl, nlbl = ndimage.label(a)
>>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1))
(array([1, 4, 3]),
array([5, 7, 9]),
[(0, 0), (1, 3), (3, 1)],
[(1, 0), (2, 3), (3, 0)])
If no index is given, non-zero `labels` are processed:
>>> ndimage.extrema(a, lbl)
(1, 9, (0, 0), (3, 0))
"""
dims = numpy.array(numpy.asarray(input).shape)
# see numpy.unravel_index to understand this line.
dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
minimums, min_positions, maximums, max_positions = _select(input, labels,
index,
find_min=True,
find_max=True,
find_min_positions=True,
find_max_positions=True)
if numpy.isscalar(minimums):
return (minimums, maximums, tuple((min_positions // dim_prod) % dims),
tuple((max_positions // dim_prod) % dims))
min_positions = [tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims]
max_positions = [tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims]
return minimums, maximums, min_positions, max_positions
def center_of_mass(input, labels=None, index=None):
"""
Calculate the center of mass of the values of an array at labels.
Parameters
----------
input : ndarray
Data from which to calculate center-of-mass. The masses can either
be positive or negative.
labels : ndarray, optional
Labels for objects in `input`, as generated by `ndimage.label`.
Only used with `index`. Dimensions must be the same as `input`.
index : int or sequence of ints, optional
Labels for which to calculate centers-of-mass. If not specified,
the combined center of mass of all labels greater than zero
will be calculated. Only used with `labels`.
Returns
-------
center_of_mass : tuple, or list of tuples
Coordinates of centers-of-mass.
Examples
--------
>>> a = np.array(([0,0,0,0],
... [0,1,1,0],
... [0,1,1,0],
... [0,1,1,0]))
>>> from scipy import ndimage
>>> ndimage.center_of_mass(a)
(2.0, 1.5)
Calculation of multiple objects in an image
>>> b = np.array(([0,1,1,0],
... [0,1,0,0],
... [0,0,0,0],
... [0,0,1,1],
... [0,0,1,1]))
>>> lbl = ndimage.label(b)[0]
>>> ndimage.center_of_mass(b, lbl, [1,2])
[(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)]
Negative masses are also accepted, which can occur for example when
bias is removed from measured data due to random noise.
>>> c = np.array(([-1,0,0,0],
... [0,-1,-1,0],
... [0,1,-1,0],
... [0,1,1,0]))
>>> ndimage.center_of_mass(c)
(-4.0, 1.0)
If there are division by zero issues, the function does not raise an
error but rather issues a RuntimeWarning before returning inf and/or NaN.
>>> d = np.array([-1, 1])
>>> ndimage.center_of_mass(d)
(inf,)
"""
normalizer = sum(input, labels, index)
grids = numpy.ogrid[[slice(0, i) for i in input.shape]]
results = [sum(input * grids[dir].astype(float), labels, index) / normalizer
for dir in range(input.ndim)]
if numpy.isscalar(results[0]):
return tuple(results)
return [tuple(v) for v in numpy.array(results).T]
def histogram(input, min, max, bins, labels=None, index=None):
"""
Calculate the histogram of the values of an array, optionally at labels.
Histogram calculates the frequency of values in an array within bins
determined by `min`, `max`, and `bins`. The `labels` and `index`
keywords can limit the scope of the histogram to specified sub-regions
within the array.
Parameters
----------
input : array_like
Data for which to calculate histogram.
min, max : int
Minimum and maximum values of range of histogram bins.
bins : int
Number of bins.
labels : array_like, optional
Labels for objects in `input`.
If not None, must be same shape as `input`.
index : int or sequence of ints, optional
Label or labels for which to calculate histogram. If None, all values
where label is greater than zero are used
Returns
-------
hist : ndarray
Histogram counts.
Examples
--------
>>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ],
... [ 0. , 0.7778, 0. , 0. ],
... [ 0. , 0. , 0. , 0. ],
... [ 0. , 0. , 0.7181, 0.2787],
... [ 0. , 0. , 0.6573, 0.3094]])
>>> from scipy import ndimage
>>> ndimage.histogram(a, 0, 1, 10)
array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0])
With labels and no indices, non-zero elements are counted:
>>> lbl, nlbl = ndimage.label(a)
>>> ndimage.histogram(a, 0, 1, 10, lbl)
array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0])
Indices can be used to count only certain objects:
>>> ndimage.histogram(a, 0, 1, 10, lbl, 2)
array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0])
"""
_bins = numpy.linspace(min, max, bins + 1)
def _hist(vals):
return numpy.histogram(vals, _bins)[0]
return labeled_comprehension(input, labels, index, _hist, object, None,
pass_positions=False)
def watershed_ift(input, markers, structure=None, output=None):
"""
Apply watershed from markers using image foresting transform algorithm.
Parameters
----------
input : array_like
Input.
markers : array_like
Markers are points within each watershed that form the beginning
of the process. Negative markers are considered background markers
which are processed after the other markers.
structure : structure element, optional
A structuring element defining the connectivity of the object can be
provided. If None, an element is generated with a squared
connectivity equal to one.
output : ndarray, optional
An output array can optionally be provided. The same shape as input.
Returns
-------
watershed_ift : ndarray
Output. Same shape as `input`.
References
----------
.. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image
foresting transform: theory, algorithms, and applications",
Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004.
"""
input = numpy.asarray(input)
if input.dtype.type not in [numpy.uint8, numpy.uint16]:
raise TypeError('only 8 and 16 unsigned inputs are supported')
if structure is None:
structure = _morphology.generate_binary_structure(input.ndim, 1)
structure = numpy.asarray(structure, dtype=bool)
if structure.ndim != input.ndim:
raise RuntimeError('structure and input must have equal rank')
for ii in structure.shape:
if ii != 3:
raise RuntimeError('structure dimensions must be equal to 3')
if not structure.flags.contiguous:
structure = structure.copy()
markers = numpy.asarray(markers)
if input.shape != markers.shape:
raise RuntimeError('input and markers must have equal shape')
integral_types = [numpy.int0,
numpy.int8,
numpy.int16,
numpy.int32,
numpy.int_,
numpy.int64,
numpy.intc,
numpy.intp]
if markers.dtype.type not in integral_types:
raise RuntimeError('marker should be of integer type')
if isinstance(output, numpy.ndarray):
if output.dtype.type not in integral_types:
raise RuntimeError('output should be of integer type')
else:
output = markers.dtype
output = _ni_support._get_output(output, input)
_nd_image.watershed_ift(input, markers, structure, output)
return output
| {
"content_hash": "fe48a6b35404d85d2b5d1ede2bfe1d3a",
"timestamp": "",
"source": "github",
"line_count": 1517,
"max_line_length": 124,
"avg_line_length": 33.076466710613055,
"alnum_prop": 0.5772166530482093,
"repo_name": "matthew-brett/scipy",
"id": "091e446bd8f9257206f68b13fe871d76f7299564",
"size": "51619",
"binary": false,
"copies": "4",
"ref": "refs/heads/polished-meson-windows",
"path": "scipy/ndimage/_measurements.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4818671"
},
{
"name": "C++",
"bytes": "3181034"
},
{
"name": "CMake",
"bytes": "29273"
},
{
"name": "Cython",
"bytes": "1035101"
},
{
"name": "Dockerfile",
"bytes": "9777"
},
{
"name": "Fortran",
"bytes": "5298461"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "133294"
},
{
"name": "PowerShell",
"bytes": "1554"
},
{
"name": "Python",
"bytes": "14259543"
},
{
"name": "Shell",
"bytes": "4415"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
import requests
from . import TransportationImageProvider
from ...utils.shapely_utils import boundary_to_polygon, raw_boundary_to_polygon
from ...services.aus_map import AUS_NW, AUS_SE
from ...utils.header import render_header_to
PATH = 'ferry_paths.json'
BASE = 'http://journeyplanner.silverrailtech.com/JourneyPlannerService/V2/REST'
API_KEY = 'eac7a147-0831-4fcf-8fa8-a5e8ffcfa039'
AUSTRALIA_BOUNDARY = raw_boundary_to_polygon([AUS_NW, AUS_SE])
def query(endpoint, params):
return requests.get(
BASE + endpoint,
params=dict(params, ApiKey=API_KEY)
)
def get_datasets():
r = query(
'/Datasets',
{'format': 'json'}
)
return r.json()["AvailableDataSets"]
def get_australian_dataset_names():
for dataset in get_datasets():
boundary = dataset["BoundaryPolyline"]
boundary = boundary_to_polygon(boundary, True)
if AUSTRALIA_BOUNDARY.intersects(boundary):
yield dataset['Id']
def get_mapping_data(dataset, route):
return query(
"/DataSets/{}/RouteMap".format(dataset),
{
'Route': route,
'MappingDataRequired': True,
'transactionId': 0,
'format': 'json'
}
).json()
def obtain_data():
dataset_names = list(get_australian_dataset_names())
for dataset in dataset_names:
r = query(
'/Datasets/{}/Stops'.format(dataset),
{
'transportModes': 'Ferry',
'format': 'json',
'searchTerm': 'Ferry'
}
)
for transit_stop in r.json()['TransitStops']:
if 'Ferry' not in transit_stop['SupportedModes']:
continue
for route in transit_stop['Routes'].split(';'):
data = get_mapping_data(dataset, route)
for path in data['MapSegments']:
yield [
tuple(map(float, point.split(',')))
for point in path['Polyline'].split(';')
]
class FerryImageProvider(TransportationImageProvider):
path = PATH
def obtain_data(self):
return self.save_json(
PATH,
list(obtain_data())
)
def build_image(self):
return render_header_to(
self.services.fonts.get_font(),
super().build_image(),
19.25,
[
'<b>MAP OF</b>',
'<b>FERRY ROUTES IN AUSTRALIA</b>',
'<i>Compiled using data from SilverRails Tech</i>',
]
)
| {
"content_hash": "6201d383ae423d267bcff520e2acc398",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 27.125,
"alnum_prop": 0.5495391705069125,
"repo_name": "Mause/statistical_atlas_of_au",
"id": "b0f45da8d552812bcbec7864b695006c588f06af",
"size": "2604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saau/sections/transportation/ferrys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106197"
}
],
"symlink_target": ""
} |
from boto.swf.exceptions import SWFResponseError, SWFTypeAlreadyExistsError
from swf import exceptions
from swf.constants import REGISTERED
from swf.exceptions import AlreadyExistsError, DoesNotExistError, ResponseError, raises
from swf.models import BaseModel
from swf.models.base import ModelDiff
from swf.utils import immutable
class ActivityTypeDoesNotExist(Exception):
pass
@immutable
class ActivityType(BaseModel):
"""ActivityType wrapper
:param domain: Domain the workflow type should be registered in
:type domain: swf.models.Domain
:param name: name of the ActivityType
:type name: str
:param version: version of the ActivityType
:type version: str
:param status: ActivityType status
:type status: swf.constants.{REGISTERED, DEPRECATED}
:param description: ActivityType description
:type description: str | None
:param creation_date: creation date of the current ActivityType
:type creation_date: float (timestamp)
:param deprecation_date: deprecation date of ActivityType
:type deprecation_date: float (timestamp)
:param task_list: specifies the default task list to use for scheduling
tasks of this activity type.
:type task_list: str
:param task_heartbeat_timeout: default maximum time before which a worker
processing a task of this type must report
progress by calling RecordActivityTaskHeartbeat.
:type task_heartbeat_timeout: int
:param task_schedule_to_close_timeout: default maximum duration for a task
of this activity type.
:type task_schedule_to_close_timeout: int
:param task_schedule_to_start_timeout: default maximum duration that a
task of this activity type can wait
before being assigned to a worker.
:type task_schedule_to_start_timeout: int
:param task_start_to_close_timeout: default maximum duration that a
worker can take to process tasks of
this activity type.
:type task_start_to_close_timeout: int
"""
kind = "type"
__slots__ = [
"domain",
"name",
"version",
"status",
"description",
"creation_date",
"deprecation_date",
"task_list",
"task_heartbeat_timeout",
"task_schedule_to_close_timeout",
"task_schedule_to_start_timeout",
"task_start_to_close_timeout",
]
def __init__(
self,
domain,
name,
version,
status=REGISTERED,
description=None,
creation_date=0.0,
deprecation_date=0.0,
task_list=None,
task_heartbeat_timeout=0,
task_schedule_to_close_timeout=0,
task_schedule_to_start_timeout=0,
task_start_to_close_timeout=0,
*args,
**kwargs
):
self.domain = domain
self.name = name
self.version = version
self.status = status
self.description = description
self.creation_date = creation_date
self.deprecation_date = deprecation_date
self.task_list = task_list
self.task_heartbeat_timeout = task_heartbeat_timeout
self.task_schedule_to_close_timeout = task_schedule_to_close_timeout
self.task_schedule_to_start_timeout = task_schedule_to_start_timeout
self.task_start_to_close_timeout = task_start_to_close_timeout
# immutable decorator rebinds class name,
# so have to use generic self.__class__
super(self.__class__, self).__init__(*args, **kwargs)
def _diff(self):
"""Checks for differences between ActivityType instance
and upstream version
:returns: A list (swf.models.base.ModelDiff) namedtuple describing
differences
:rtype: ModelDiff
"""
try:
description = self.connection.describe_activity_type(
self.domain.name, self.name, self.version
)
except SWFResponseError as err:
if err.error_code == "UnknownResourceFault":
raise DoesNotExistError("Remote ActivityType does not exist")
raise ResponseError(err.body["message"])
info = description["typeInfo"]
config = description["configuration"]
return ModelDiff(
("name", self.name, info["activityType"]["name"]),
("version", self.version, info["activityType"]["version"]),
("status", self.status, info["status"]),
("description", self.description, info["description"]),
("creation_date", self.creation_date, info["creationDate"]),
("deprecation_date", self.deprecation_date, info["deprecationDate"]),
("task_list", self.task_list, config["defaultTaskList"]["name"]),
(
"task_heartbeat_timeout",
self.task_heartbeat_timeout,
config["defaultTaskHeartbeatTimeout"],
),
(
"task_schedule_to_close_timeout",
self.task_schedule_to_close_timeout,
config["defaultTaskScheduleToCloseTimeout"],
),
(
"task_schedule_to_start_timeout",
self.task_schedule_to_start_timeout,
config["defaultTaskScheduleToStartTimeout"],
),
(
"task_start_to_close_timeout",
self.task_start_to_close_timeout,
config["defaultTaskStartToCloseTimeout"],
),
)
@property
@exceptions.is_not(ActivityTypeDoesNotExist)
@exceptions.catch(
SWFResponseError,
raises(
ActivityTypeDoesNotExist,
when=exceptions.is_unknown("ActivityType"),
extract=exceptions.extract_resource,
),
)
def exists(self):
"""Checks if the ActivityType exists amazon-side
:rtype: bool
"""
self.connection.describe_activity_type(
self.domain.name, self.name, self.version
)
return True
def save(self):
"""Creates the activity type amazon side"""
try:
self.connection.register_activity_type(
self.domain.name,
self.name,
self.version,
task_list=str(self.task_list),
default_task_heartbeat_timeout=str(self.task_heartbeat_timeout),
default_task_schedule_to_close_timeout=str(
self.task_schedule_to_close_timeout
),
default_task_schedule_to_start_timeout=str(
self.task_schedule_to_start_timeout
),
default_task_start_to_close_timeout=str(
self.task_start_to_close_timeout
),
description=self.description,
)
except SWFTypeAlreadyExistsError as err:
raise AlreadyExistsError("{} already exists".format(self))
except SWFResponseError as err:
if err.error_code in ["UnknownResourceFault", "TypeDeprecatedFault"]:
raise DoesNotExistError(err.body["message"])
raise
@exceptions.catch(
SWFResponseError,
raises(
ActivityTypeDoesNotExist,
when=exceptions.is_unknown("ActivityType"),
extract=exceptions.extract_resource,
),
)
def delete(self):
"""Deprecates the domain amazon side"""
self.connection.deprecate_activity_type(
self.domain.name, self.name, self.version
)
def upstream(self):
from swf.querysets.activity import ActivityTypeQuerySet
qs = ActivityTypeQuerySet(self.domain)
return qs.get(self.name, self.version)
def __repr__(self):
return "<{} domain={} name={} version={} status={}>".format(
self.__class__.__name__,
self.domain.name,
self.name,
self.version,
self.status,
)
@immutable
class ActivityTask(BaseModel):
__slots__ = [
"domain",
"task_list",
"task_token",
"activity_type",
"workflow_execution",
"input",
"activity_id",
"started_event_id",
]
def __init__(
self,
domain,
task_list,
task_token=None,
activity_type=None,
workflow_execution=None,
input=None,
activity_id=None,
started_event_id=None,
context=None,
):
self.domain = domain
self.task_list = task_list
self.task_token = task_token
self.activity_type = activity_type
self.workflow_execution = workflow_execution
self.input = input
self.activity_id = activity_id
self.started_event_id = started_event_id
self.context = context
@classmethod
def from_poll(cls, domain, task_list, data):
from .workflow import WorkflowExecution
activity_type = ActivityType(
domain, data["activityType"]["name"], data["activityType"]["version"]
)
workflow_execution = WorkflowExecution(
domain,
data["workflowExecution"]["workflowId"],
data["workflowExecution"]["runId"],
)
return cls(
domain,
task_list,
task_token=data["taskToken"],
activity_type=activity_type,
workflow_execution=workflow_execution,
input=data.get("input"),
activity_id=data["activityId"],
started_event_id=data["startedEventId"],
context=data,
)
| {
"content_hash": "cddae068758cf8b96a17748d06bd8401",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 87,
"avg_line_length": 32.495114006514655,
"alnum_prop": 0.5743785084202085,
"repo_name": "botify-labs/simpleflow",
"id": "3998a6f3e76a2611df7e706db35d82abc980ddae",
"size": "10121",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "swf/models/activity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "298"
},
{
"name": "Python",
"bytes": "801698"
},
{
"name": "Shell",
"bytes": "4481"
}
],
"symlink_target": ""
} |
from scrapy.item import Item, Field
class Ingredient(Item):
name = Field()
quantity = Field()
class Recipe(Item):
id = Field()
name = Field()
author = Field()
description = Field()
ingredients = Field()
instructions = Field()
published_date = Field()
updated_date = Field()
class CookpadRecipe(Recipe):
category = Field() # Stores only the main category
categories = Field() # Stores all of the relevant categories, including parents
report_count = Field()
comment_count = Field()
advice = Field()
history = Field()
image_main = Field()
images_instruction = Field()
related_keywords = Field()
class AllrecipesRecipe(Recipe):
category = Field()
prep_time = Field()
cook_time = Field()
rating = Field()
nutrients = Field()
| {
"content_hash": "64117327d48353c60940b49d76b2dbf9",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 83,
"avg_line_length": 22.88888888888889,
"alnum_prop": 0.6347087378640777,
"repo_name": "mrorii/cookbot",
"id": "70be1d7ad266e958fc90534ebdd0de46ca353145",
"size": "941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cookbot/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21032"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_horizon.dashboards.admin.volumes.volume_types.qos_specs \
import views
urlpatterns = patterns(
'',
url(r'^(?P<qos_spec_id>[^/]+)/create/$',
views.CreateKeyValuePairView.as_view(), name='create'),
url(r'^(?P<qos_spec_id>[^/]+)/$', views.IndexView.as_view(), name='index'),
url(r'^(?P<qos_spec_id>[^/]+)/key/(?P<key>[^/]+)/edit/$',
views.EditKeyValuePairView.as_view(), name='edit')
)
| {
"content_hash": "ee6518283a8954c9352b15dd0445eb81",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 36.214285714285715,
"alnum_prop": 0.631163708086785,
"repo_name": "mrunge/openstack_horizon",
"id": "15a35e3e928f819d784d79a209639de956ba62c8",
"size": "1053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_horizon/dashboards/admin/volumes/volume_types/qos_specs/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63809"
},
{
"name": "JavaScript",
"bytes": "40"
},
{
"name": "Python",
"bytes": "3460539"
},
{
"name": "Shell",
"bytes": "16000"
}
],
"symlink_target": ""
} |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/api/source_info.proto',
package='google.api',
syntax='proto3',
serialized_options=_b('\n\016com.google.apiB\017SourceInfoProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI'),
serialized_pb=_b('\n\x1cgoogle/api/source_info.proto\x12\ngoogle.api\x1a\x19google/protobuf/any.proto\"8\n\nSourceInfo\x12*\n\x0csource_files\x18\x01 \x03(\x0b\x32\x14.google.protobuf.AnyBq\n\x0e\x63om.google.apiB\x0fSourceInfoProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
_SOURCEINFO = _descriptor.Descriptor(
name='SourceInfo',
full_name='google.api.SourceInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source_files', full_name='google.api.SourceInfo.source_files', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=71,
serialized_end=127,
)
_SOURCEINFO.fields_by_name['source_files'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['SourceInfo'] = _SOURCEINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SourceInfo = _reflection.GeneratedProtocolMessageType('SourceInfo', (_message.Message,), {
'DESCRIPTOR' : _SOURCEINFO,
'__module__' : 'google.api.source_info_pb2'
# @@protoc_insertion_point(class_scope:google.api.SourceInfo)
})
_sym_db.RegisterMessage(SourceInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "f740ca4fe0649c553d770e75c7d327d2",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 339,
"avg_line_length": 35.05714285714286,
"alnum_prop": 0.7449062754686226,
"repo_name": "googleapis/api-client-staging",
"id": "6d662feecdca9c7b64bb4dc07e307c1ae510ff9b",
"size": "2577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generated/python/googleapis-common-protos/google/api/source_info_pb2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "246351"
},
{
"name": "JavaScript",
"bytes": "890945"
},
{
"name": "PHP",
"bytes": "3462920"
},
{
"name": "Python",
"bytes": "1400280"
},
{
"name": "Shell",
"bytes": "592"
}
],
"symlink_target": ""
} |
__author__ = 'had'
# The MIT License (MIT)
# Copyright (c) [2015] [Houtmann Hadrien]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the Aidez-moi), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from django import forms
from ticket.models import User, Tickets, UserProfile, Follow, Entity
from django.utils.translation import ugettext as _
from djangoticket.settings import USE_MAIL
from ticket.tasks import follow_on_ticket
import json
class ConnexionForm(forms.Form):
"""
Pour la page de login
"""
username = forms.CharField(label=_("Nom d'utilisateur"), max_length=30,
widget=forms.TextInput(attrs={
'type': "text",
'placeholder': "Username"}))
password = forms.CharField(label=_("Mot de passe"),
widget=forms.PasswordInput(attrs={
'type': "password",
'placeholder': "Password"}))
class TicketForm(forms.ModelForm):
"""
Pour ajouter un ticket
"""
title = forms.CharField(label=_('Titre'),
widget=forms.TextInput
(attrs={'placeholder': _('Titre'),
'size': '110',
}))
content = forms.CharField(label=_('Ticket'),
widget=forms.Textarea
(attrs={'placeholder': _('Contenu du ticket'),
'rows': '5',
'class': 'uk-width-1-1'}))
priority = forms.ChoiceField(
choices=Tickets.PRIORITY_CHOICES,
required=True,
initial='3',
label=(_('Urgence')),
help_text=(_('Veuillez selectionner une priorité.')),
)
# Pour choisir que les membres du staff
assign_to = forms.ModelChoiceField(
queryset=User.objects.all().filter(is_staff=1),
label=_('Assigné à'),
required=False)
file = forms.FileField(required=False)
entity = forms.ModelChoiceField(queryset=Entity.objects.all(),
label=_("Entité"),
required=False)
class Meta:
model = Tickets
exclude = ('created', 'create_by', 'complete')
def __init__(self, *args, **kwargs):
"""
Pour exclure certains champs de la classe TicketForm
afin d'afficher assign_to et status pour un membre du staff
"""
user = kwargs.pop('user', None)
super(TicketForm, self).__init__(*args, **kwargs)
if user.is_staff is False:
del self.fields['assign_to']
#del self.fields['status']
def close(self, ticket_id, user):
""" Fonction pour clore un ticket"""
ticket = Tickets.objects.get(pk=ticket_id)
if ticket.depends_on == '':
self.save_one(ticket_id, user)
else:
ticket_t = Tickets.objects.get(pk=ticket.depends_on)
if ticket_t.status == 'CLOSED':
self.save_one(ticket_id,user)
else:
raise Exception(_('vous devez clore le ticket %s') % ticket.depends_on)
def save_one(self, ticket_id, user, *args, **kwargs):
"""
:param ticket_id: Clé du ticket
:param user: id de la session user
La fonction edit est pour l'édition d'un ticket et elle permet de sauvegarder les
élements changant dans la table Follow afin d'avoir un suivi du ticket
"""
old_value = {} # Dictionnaire pour envoyer les valeurs changées dans le worker celery
new_value = {}
if Tickets.objects.filter(id=ticket_id).exists():
if self.has_changed():
ticket = Tickets.objects.filter(pk=ticket_id)
for field in self.changed_data:
new = self[field].value()
old_value[field] = ticket.values(field)[0].get(field)
new_value[field] = new
Follow.objects.create(
ticket_id=ticket_id,
old_value=json.dumps(old_value),
new_value=json.dumps(new_value),
follow_by=user)
if USE_MAIL:
follow_on_ticket.delay(ticket_id, old_value, new_value)
else:
pass
super(TicketForm, self).save(*args, **kwargs)
class StatusForm(TicketForm, forms.ModelForm):
"""
Pour modifier le statut du ticket
"""
status = forms.CharField(required=False,
widget=forms.Select(
choices=Tickets.STATUS_CHOICES))
class Meta:
model = Tickets
fields = ['status']
exclude = ('title',
'content',
'created',
'priority',
'last_edited',
'complete',
'depends_on',
'types',
'assign_to',
'category',
'create_by',
'ask_to_delete',)
def __init__(self, *args, **kwargs):
"""
Pour exclure certains champs de la classe TicketForm
afin d'afficher assign_to et status pour un membre du staff
"""
super(StatusForm, self).__init__(*args, **kwargs)
del self.fields['priority']
del self.fields['title']
del self.fields['content']
class ResponseForm(forms.ModelForm):
follow = forms.CharField(label='Ticket', required=False, widget=forms.Textarea(
attrs={'placeholder': _('Réponse au ticket'),
'rows': '4',
'class': 'uk-width-1-1'}))
class Meta:
model = Follow
fields = ['follow']
exclude = (
'date_follow',
'ticket_id',
'field',
'new_value',
'old_value',
'follower')
class EntityForm(forms.ModelForm):
name = forms.ModelChoiceField(queryset=Entity.objects.all())
class Meta:
model = Entity
fields = ['name']
| {
"content_hash": "ba7c04f82792809ad189415f95d84a13",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 93,
"avg_line_length": 33.88732394366197,
"alnum_prop": 0.5451648656137434,
"repo_name": "hadmagic/Aidez-moi",
"id": "e7f22904ed89b0b39a890cbf1ebab8ab8c21fe50",
"size": "7242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ticket/forms/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29741"
},
{
"name": "HTML",
"bytes": "18933"
},
{
"name": "JavaScript",
"bytes": "59965"
},
{
"name": "Python",
"bytes": "85394"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from jinja2 import contextfilter
from pprint import pformat
from cgi import escape as html_escape
def html_format(variable):
data = pformat(variable, indent=4)
data = html_escape(data)
data = data.replace(' ', ' ')
data = data.replace('\n', '<br />')
return data
def dump(variable):
return html_format(variable)
@contextfilter
def dump_all(context, var):
return html_format(context.items())
files = {}
def dump_file(filename):
ret = files.get(filename, False)
if ret:
return ret
with open(filename, 'r') as fp:
data = fp.read()
files[filename] = data
return data
urls = {}
def dump_url(url):
ret = urls.get(url, False)
if ret:
return ret
import urllib2
response = urllib2.urlopen(url)
html = response.read()
urls[url] = html
return html
| {
"content_hash": "f8c32e62e0e9a68df4c5a704534feb16",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 59,
"avg_line_length": 18.73469387755102,
"alnum_prop": 0.6339869281045751,
"repo_name": "Saevon/saevon.github.io",
"id": "4f5aece68e043336bab58c2bbbc28f5592ea87f0",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/source",
"path": "plugins/jinja2/debug_dump.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15321"
},
{
"name": "HTML",
"bytes": "19897"
},
{
"name": "JavaScript",
"bytes": "847"
},
{
"name": "Python",
"bytes": "16217"
},
{
"name": "Shell",
"bytes": "4670"
}
],
"symlink_target": ""
} |
import os
import os.path
import glob
pyfiles = glob.glob('*.py')
# Get file sizes and modification dates
name_sz_date = [(name, os.path.getsize(name), os.path.getmtime(name))
for name in pyfiles]
for r in name_sz_date:
print(r)
# Get file metadata
file_metadata = [(name, os.stat(name)) for name in pyfiles]
for name, meta in file_metadata:
print(name, meta.st_size, meta.st_mtime)
| {
"content_hash": "c18aea0d109afcd471ed678f1548397a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 69,
"avg_line_length": 24.11764705882353,
"alnum_prop": 0.6780487804878049,
"repo_name": "huawei-cloud/compass-adapters",
"id": "6f25844a522eb6e489198daa9daeb18240de0c41",
"size": "452",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "chef/cookbooks/python/src/5/getting_a_directory_listing/example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "21125"
},
{
"name": "CSS",
"bytes": "111630"
},
{
"name": "Perl",
"bytes": "848"
},
{
"name": "Python",
"bytes": "208453"
},
{
"name": "Ruby",
"bytes": "1406351"
},
{
"name": "Shell",
"bytes": "5072"
}
],
"symlink_target": ""
} |
"""Axonius Integration for Cortex XSOAR - Unit Tests file."""
from TestData.Raw_data import USERS_SQS, DUMMY_TAGS, DUMMY_DEVICES_IDS, DUMMY_USER_IDS, DUMMY_DEVICES
from TestData.Expected_data import EXPECTED_USERS_SQS, EXPECTED_DEVICE_TAGS, EXPECTED_DEVICE
from Axonius import run_command
class DummyDevices:
def __init__(self):
self.saved_query = DummyDevicesSavedQueries()
self.labels = DummyDevicesLabels()
self.LAST_GET = {}
@staticmethod
def get_by_hostname(value: str, max_rows: int, fields: list):
return DUMMY_DEVICES
class DummyDevicesSavedQueries:
@staticmethod
def get():
return USERS_SQS
class DummyDevicesLabels:
@staticmethod
def get():
return DUMMY_TAGS
@staticmethod
def add(rows: list, labels: list):
return len(DUMMY_DEVICES_IDS)
class DummyUsers:
def __init__(self):
self.saved_query = DummyUsersSavedQueries()
self.labels = DummyUsersLabels()
self.LAST_GET = {}
class DummyUsersSavedQueries:
@staticmethod
def get():
return USERS_SQS
class DummyUsersLabels:
@staticmethod
def get():
return DUMMY_TAGS
@staticmethod
def remove(rows: list, labels: list):
return len(DUMMY_USER_IDS)
class DummyConnect:
def __init__(self):
self.devices = DummyDevices()
self.users = DummyUsers()
@staticmethod
def start():
return True
def test_client():
"""Pass."""
client = DummyConnect()
expected = "ok"
args = {}
result = run_command(client=client, args=args, command="test-module")
assert expected == result
def test_get_saved_queries():
client = DummyConnect()
args = {"type": "users"}
result = run_command(client=client, args=args, command="axonius-get-saved-queries")
assert len(EXPECTED_USERS_SQS) == len(result.outputs)
def test_get_tags():
client = DummyConnect()
args = {"type": "devices"}
result = run_command(client=client, args=args, command="axonius-get-tags")
assert EXPECTED_DEVICE_TAGS == result.outputs
def test_add_tags():
client = DummyConnect()
args = {"type": "devices", "ids": DUMMY_DEVICES_IDS, "tag_name": "test"}
result = run_command(client=client, args=args, command="axonius-add-tag")
assert len(DUMMY_DEVICES_IDS) == result.outputs
def test_remove_tags():
client = DummyConnect()
args = {"type": "users", "ids": DUMMY_USER_IDS, "tag_name": "test"}
result = run_command(client=client, args=args, command="axonius-remove-tag")
assert len(DUMMY_USER_IDS) == result.outputs
def test_get_device():
client = DummyConnect()
args = {"value": "DESKTOP-Gary-Gaither"}
result = run_command(client=client, args=args, command="axonius-get-devices-by-hostname")
assert EXPECTED_DEVICE["internal_axon_id"] == result.outputs["internal_axon_id"]
| {
"content_hash": "e9a296ab9f8481991b9d3489ee2d932a",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 101,
"avg_line_length": 26.354545454545455,
"alnum_prop": 0.6585029320455329,
"repo_name": "VirusTotal/content",
"id": "ddeab790716b74b594e5c489d09d522995847d7a",
"size": "2899",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/Axonius/Integrations/Axonius/Axonius_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
import re
import urllib2
import socket
import HTMLParser
import logging
from datetime import datetime
URL_REGEX = re.compile(r"(?:^|\s)((?:https?://)?(?:[a-z0-9.\-]+[.][a-z]{2,4}/?)(?:[^\s()<>]*|\((?:[^\s()<>]+|(?:\([^\s()<>]+\)))*\))+(?:\((?:[^\s()<>]+|(?:\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'\".,<>?]))", flags=re.IGNORECASE|re.DOTALL)
TITLE_REGEX = re.compile(r'<title(\s+.*?)?>(.*?)</title>', flags=re.IGNORECASE|re.DOTALL)
class URLsPlugin(object):
logger = None
"""Logging object for URLsPlugin"""
timeout = 10
"""Timeout in seconds before bailing on loading page"""
read_bytes = 524288
"""Bytes to read before bailing on loading page (512KB)"""
lookup_cooloff = 10
"""Timeout in seconds before looking up the same URL again"""
last_url = None
"""Holds the last URL looked up, for cooloff"""
last_url_at = None
"""Holds time last URL was looked up, for cooloff"""
def __init__(self, cardinal, config):
# Initialize logger
self.logger = logging.getLogger(__name__)
# Only check config if it exists
if config is None:
return
if 'timeout' in config:
self.timeout = config['timeout']
if 'read_bytes' in config:
self.read_bytes = config['read_bytes']
if 'lookup_cooloff' in config:
self.lookup_cooloff = config['lookup_cooloff']
cardinal.event_manager.register('urls.detection', 2)
def get_title(self, cardinal, user, channel, msg):
# Find every URL within the message
urls = re.findall(URL_REGEX, msg)
# Loop through the URLs, and make them valid
for url in urls:
if url[:7].lower() != "http://" and url[:8].lower() != "https://":
url = "http://" + url
if (url == self.last_url and self.last_url_at and
(datetime.now() - self.last_url_at).seconds < self.lookup_cooloff):
return
self.last_url = url
self.last_url_at = datetime.now()
# Check if another plugin has hooked into this URL and wants to
# provide information itself
hooked = cardinal.event_manager.fire('urls.detection', channel, url)
if hooked:
return
# Attempt to load the page, timing out after a default of ten seconds
try:
o = urllib2.build_opener()
o.addheaders = [
('User-agent', 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36')
]
f = o.open(url, timeout=self.timeout)
except Exception, e:
self.logger.exception("Unable to load URL: %s" % url)
return
# Attempt to find the title
content_type = f.info()['content-type']
if not (('text/html' in content_type) or ('text/xhtml' in content_type)):
return
content = f.read(self.read_bytes)
f.close()
title = re.search(TITLE_REGEX, content)
if title:
if len(title.group(2).strip()) > 0:
title = re.sub('\s+', ' ', title.group(2)).strip()
h = HTMLParser.HTMLParser()
title = str(h.unescape(title).encode('utf-8'))
# Truncate long titles to the first 200 characters.
title_to_send = title[:200] if len(title) >= 200 else title
cardinal.sendMsg(channel, "URL Found: %s" % title_to_send)
continue
get_title.regex = URL_REGEX
def close(self, cardinal):
cardinal.event_manager.remove('urls.detection')
def setup(cardinal, config):
return URLsPlugin(cardinal, config)
| {
"content_hash": "b2a24fd89e08c11f089e52b01006f1a5",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 236,
"avg_line_length": 34.44642857142857,
"alnum_prop": 0.5393986521513737,
"repo_name": "BiohZn/Cardinal",
"id": "8f7355cbcba8521f4b396f6133563d16f82d93b6",
"size": "3881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/urls/plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "136515"
}
],
"symlink_target": ""
} |
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..client.configuration import Configuration
from ..client.api_client import ApiClient
class EmailArchiveApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_bcc_email_archive(self, account_id, **kwargs):
"""
Creates a blind carbon copy email archive entry
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_bcc_email_archive(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param BccEmailArchive bcc_email_archive:
:return: BccEmailArchive
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_bcc_email_archive_with_http_info(account_id, **kwargs)
else:
(data) = self.create_bcc_email_archive_with_http_info(account_id, **kwargs)
return data
def create_bcc_email_archive_with_http_info(self, account_id, **kwargs):
"""
Creates a blind carbon copy email archive entry
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_bcc_email_archive_with_http_info(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param BccEmailArchive bcc_email_archive:
:return: BccEmailArchive
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'bcc_email_archive']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_bcc_email_archive" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `create_bcc_email_archive`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/settings/bcc_email_archives'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'bcc_email_archive' in params:
body_params = params['bcc_email_archive']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BccEmailArchive',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_bcc_email_archive(self, account_id, bcc_email_archive_id, **kwargs):
"""
Delete a blind carbon copy email archive for an account.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_bcc_email_archive(account_id, bcc_email_archive_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str bcc_email_archive_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_bcc_email_archive_with_http_info(account_id, bcc_email_archive_id, **kwargs)
else:
(data) = self.delete_bcc_email_archive_with_http_info(account_id, bcc_email_archive_id, **kwargs)
return data
def delete_bcc_email_archive_with_http_info(self, account_id, bcc_email_archive_id, **kwargs):
"""
Delete a blind carbon copy email archive for an account.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_bcc_email_archive_with_http_info(account_id, bcc_email_archive_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str bcc_email_archive_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'bcc_email_archive_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_bcc_email_archive" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `delete_bcc_email_archive`")
# verify the required parameter 'bcc_email_archive_id' is set
if ('bcc_email_archive_id' not in params) or (params['bcc_email_archive_id'] is None):
raise ValueError("Missing the required parameter `bcc_email_archive_id` when calling `delete_bcc_email_archive`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/settings/bcc_email_archives/{bccEmailArchiveId}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'bcc_email_archive_id' in params:
path_params['bccEmailArchiveId'] = params['bcc_email_archive_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bcc_email_archive_history_list(self, account_id, bcc_email_archive_id, **kwargs):
"""
Get the blind carbon copy email archive history entries for the specified archive
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bcc_email_archive_history_list(account_id, bcc_email_archive_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str bcc_email_archive_id: (required)
:param str count:
:param str start_position:
:return: BccEmailArchiveHistoryList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bcc_email_archive_history_list_with_http_info(account_id, bcc_email_archive_id, **kwargs)
else:
(data) = self.get_bcc_email_archive_history_list_with_http_info(account_id, bcc_email_archive_id, **kwargs)
return data
def get_bcc_email_archive_history_list_with_http_info(self, account_id, bcc_email_archive_id, **kwargs):
"""
Get the blind carbon copy email archive history entries for the specified archive
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bcc_email_archive_history_list_with_http_info(account_id, bcc_email_archive_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str bcc_email_archive_id: (required)
:param str count:
:param str start_position:
:return: BccEmailArchiveHistoryList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'bcc_email_archive_id', 'count', 'start_position']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bcc_email_archive_history_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_bcc_email_archive_history_list`")
# verify the required parameter 'bcc_email_archive_id' is set
if ('bcc_email_archive_id' not in params) or (params['bcc_email_archive_id'] is None):
raise ValueError("Missing the required parameter `bcc_email_archive_id` when calling `get_bcc_email_archive_history_list`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/settings/bcc_email_archives/{bccEmailArchiveId}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'bcc_email_archive_id' in params:
path_params['bccEmailArchiveId'] = params['bcc_email_archive_id']
query_params = {}
if 'count' in params:
query_params['count'] = params['count']
if 'start_position' in params:
query_params['start_position'] = params['start_position']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BccEmailArchiveHistoryList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bcc_email_archive_list(self, account_id, **kwargs):
"""
Get the blind carbon copy email archive entries owned by the specified account
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bcc_email_archive_list(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str count:
:param str start_position:
:return: BccEmailArchiveList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bcc_email_archive_list_with_http_info(account_id, **kwargs)
else:
(data) = self.get_bcc_email_archive_list_with_http_info(account_id, **kwargs)
return data
def get_bcc_email_archive_list_with_http_info(self, account_id, **kwargs):
"""
Get the blind carbon copy email archive entries owned by the specified account
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bcc_email_archive_list_with_http_info(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str count:
:param str start_position:
:return: BccEmailArchiveList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'count', 'start_position']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bcc_email_archive_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_bcc_email_archive_list`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/settings/bcc_email_archives'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
query_params = {}
if 'count' in params:
query_params['count'] = params['count']
if 'start_position' in params:
query_params['start_position'] = params['start_position']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BccEmailArchiveList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| {
"content_hash": "7ae05cd56f87948e7117feba76abf66e",
"timestamp": "",
"source": "github",
"line_count": 477,
"max_line_length": 140,
"avg_line_length": 44.926624737945495,
"alnum_prop": 0.567942137190854,
"repo_name": "docusign/docusign-python-client",
"id": "b7b3e375cb8c8c61989f12b3c42ef98fe21ed920",
"size": "21447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docusign_esign/apis/email_archive_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9687716"
}
],
"symlink_target": ""
} |
import imath
import IECore
class V3fVectorCreator( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
"Op that generates a V3fVectorData object.",
IECore.ObjectParameter(
name = "result",
description = "A V3fVectorData object.",
defaultValue = IECore.V3fVectorData(),
type = IECore.TypeId.V3fVectorData
)
)
self.parameters().addParameters([
IECore.IntParameter(
name = "size",
description = "The number of elements to put in the result vector.",
defaultValue = 1
),
IECore.V3fParameter(
name = "value",
description = "Value to put in each of the vector elements.",
defaultValue = imath.V3f(1)
) ]
)
def doOperation( self, args ) :
size = args['size'].value
value = args['value'].value
data = [ value for x in range(size) ]
result = IECore.V3fVectorData( data )
return result
IECore.registerRunTimeTyped( V3fVectorCreator )
| {
"content_hash": "ffc00710b5bc22d2218ba1aee8f0896f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 24.263157894736842,
"alnum_prop": 0.6648590021691974,
"repo_name": "appleseedhq/cortex",
"id": "bb7a6a6e14282dcbf8288f3ab0f55d834641fba1",
"size": "922",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/IECoreHoudini/ops/vectors/V3fVectorCreator/V3fVectorCreator-1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1374"
},
{
"name": "C",
"bytes": "66503"
},
{
"name": "C++",
"bytes": "9536541"
},
{
"name": "CMake",
"bytes": "95418"
},
{
"name": "GLSL",
"bytes": "24422"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "2360"
},
{
"name": "Python",
"bytes": "4651272"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import sys
import platform
import os
import _pytest._code
from _pytest.debugging import SUPPORTS_BREAKPOINT_BUILTIN
import pytest
_ENVIRON_PYTHONBREAKPOINT = os.environ.get("PYTHONBREAKPOINT", "")
def runpdb_and_get_report(testdir, source):
p = testdir.makepyfile(source)
result = testdir.runpytest_inprocess("--pdb", p)
reports = result.reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
@pytest.fixture
def custom_pdb_calls():
called = []
# install dummy debugger class and track which methods were called on it
class _CustomPdb(object):
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
_pytest._CustomPdb = _CustomPdb
return called
@pytest.fixture
def custom_debugger_hook():
called = []
# install dummy debugger class and track which methods were called on it
class _CustomDebugger(object):
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
def set_trace(self, frame):
print("**CustomDebugger**")
called.append("set_trace")
_pytest._CustomDebugger = _CustomDebugger
yield called
del _pytest._CustomDebugger
class TestPDB(object):
@pytest.fixture
def pdblist(self, request):
monkeypatch = request.getfixturevalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin("debugging")
monkeypatch.setattr(plugin, "post_mortem", mypdb)
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
def test_func():
assert 0
""",
)
assert rep.failed
assert len(pdblist) == 1
tb = _pytest._code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""",
)
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
import pytest
def test_func():
pytest.skip("hello")
""",
)
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
import bdb
def test_func():
raise bdb.BdbQuit
""",
)
assert rep.failed
assert len(pdblist) == 0
def test_pdb_on_KeyboardInterrupt(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
def test_func():
raise KeyboardInterrupt
""",
)
assert rep.failed
assert len(pdblist) == 1
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
i = 0
assert i == 1
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*i = 0")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" not in rest
self.flush(child)
@staticmethod
def flush(child):
if platform.system() == "Darwin":
return
if child.isalive():
child.wait()
def test_pdb_unittest_postmortem(self, testdir):
p1 = testdir.makepyfile(
"""
import unittest
class Blub(unittest.TestCase):
def tearDown(self):
self.filename = None
def test_false(self):
self.filename = 'debug' + '.me'
assert 0
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("Pdb")
child.sendline("p self.filename")
child.sendeof()
rest = child.read().decode("utf8")
assert "debug.me" in rest
self.flush(child)
def test_pdb_unittest_skip(self, testdir):
"""Test for issue #2137"""
p1 = testdir.makepyfile(
"""
import unittest
@unittest.skipIf(True, 'Skipping also with pdb active')
class MyTestCase(unittest.TestCase):
def test_one(self):
assert 0
"""
)
child = testdir.spawn_pytest("-rs --pdb %s" % p1)
child.expect("Skipping also with pdb active")
child.expect("1 skipped in")
child.sendeof()
self.flush(child)
def test_pdb_print_captured_stdout(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
print("get\\x20rekt")
assert False
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("captured stdout")
child.expect("get rekt")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_print_captured_stderr(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
import sys
sys.stderr.write("get\\x20rekt")
assert False
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("captured stderr")
child.expect("get rekt")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_dont_print_empty_captured_stdout_and_stderr(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
assert False
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("Pdb")
output = child.before.decode("utf8")
child.sendeof()
assert "captured stdout" not in output
assert "captured stderr" not in output
self.flush(child)
@pytest.mark.parametrize("showcapture", ["all", "no", "log"])
def test_pdb_print_captured_logs(self, testdir, showcapture):
p1 = testdir.makepyfile(
"""
def test_1():
import logging
logging.warn("get " + "rekt")
assert False
"""
)
child = testdir.spawn_pytest(
"--show-capture={} --pdb {}".format(showcapture, p1)
)
if showcapture in ("all", "log"):
child.expect("captured log")
child.expect("get rekt")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_print_captured_logs_nologging(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
import logging
logging.warn("get " + "rekt")
assert False
"""
)
child = testdir.spawn_pytest("--show-capture=all --pdb -p no:logging %s" % p1)
child.expect("get rekt")
output = child.before.decode("utf8")
assert "captured log" not in output
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("Pdb")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
self.flush(child)
def test_pdb_interaction_on_collection_issue181(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
xxx
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("Pdb")
child.sendeof()
child.expect("1 error")
self.flush(child)
def test_pdb_interaction_on_internal_error(self, testdir):
testdir.makeconftest(
"""
def pytest_runtest_protocol():
0/0
"""
)
p1 = testdir.makepyfile("def test_func(): pass")
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("Pdb")
child.sendeof()
self.flush(child)
def test_pdb_interaction_capturing_simple(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf-8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
self.flush(child)
def test_pdb_set_trace_interception(self, testdir):
p1 = testdir.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
self.flush(child)
def test_pdb_and_capsys(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def test_1(capsys):
print ("hello1")
pytest.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_with_caplog_on_pdb_invocation(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1(capsys, caplog):
import logging
logging.getLogger(__name__).warning("some_warning")
assert 0
"""
)
child = testdir.spawn_pytest("--pdb %s" % str(p1))
child.send("caplog.record_tuples\n")
child.expect_exact(
"[('test_pdb_with_caplog_on_pdb_invocation', 30, 'some_warning')]"
)
child.sendeof()
child.read()
self.flush(child)
def test_set_trace_capturing_afterwards(self, testdir):
p1 = testdir.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
def test_2():
print ("hello")
assert 0
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.expect("Captured")
child.expect("hello")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_interaction_doctest(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
"""
)
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("Pdb")
child.sendline("i")
child.expect("0")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
print ("hello18")
pytest.set_trace()
x = 4
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("Pdb")
child.sendline("c")
child.expect("x = 4")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
self.flush(child)
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
pytest.set_trace()
x = 5
"""
)
child = testdir.spawn("{} {}".format(sys.executable, p1))
child.expect("x = 5")
child.expect("Pdb")
child.sendeof()
self.flush(child)
def test_pdb_used_in_generate_tests(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("x = 5")
child.expect("Pdb")
child.sendeof()
self.flush(child)
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("xxx")
result = testdir.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
def test_enter_pdb_hook_is_called(self, testdir):
testdir.makeconftest(
"""
def pytest_enter_pdb(config):
assert config.testing_verification == 'configured'
print 'enter_pdb_hook'
def pytest_configure(config):
config.testing_verification = 'configured'
"""
)
p1 = testdir.makepyfile(
"""
import pytest
def test_foo():
pytest.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("enter_pdb_hook")
child.send("c\n")
child.sendeof()
self.flush(child)
def test_pdb_custom_cls(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess("--pdb", "--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
assert custom_pdb_calls == ["init", "reset", "interaction"]
def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess("--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
assert custom_pdb_calls == []
def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch):
testdir.makepyfile(
custom_pdb="""
class CustomPdb(object):
def set_trace(*args, **kwargs):
print 'custom set_trace>'
"""
)
p1 = testdir.makepyfile(
"""
import pytest
def test_foo():
pytest.set_trace()
"""
)
monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir))
child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1))
child.expect("custom set_trace>")
self.flush(child)
class TestDebuggingBreakpoints(object):
def test_supports_breakpoint_module_global(self):
"""
Test that supports breakpoint global marks on Python 3.7+ and not on
CPython 3.5, 2.7
"""
if sys.version_info.major == 3 and sys.version_info.minor >= 7:
assert SUPPORTS_BREAKPOINT_BUILTIN is True
if sys.version_info.major == 3 and sys.version_info.minor == 5:
assert SUPPORTS_BREAKPOINT_BUILTIN is False
if sys.version_info.major == 2 and sys.version_info.minor == 7:
assert SUPPORTS_BREAKPOINT_BUILTIN is False
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
@pytest.mark.parametrize("arg", ["--pdb", ""])
def test_sys_breakpointhook_configure_and_unconfigure(self, testdir, arg):
"""
Test that sys.breakpointhook is set to the custom Pdb class once configured, test that
hook is reset to system value once pytest has been unconfigured
"""
testdir.makeconftest(
"""
import sys
from pytest import hookimpl
from _pytest.debugging import pytestPDB
def pytest_configure(config):
config._cleanup.append(check_restored)
def check_restored():
assert sys.breakpointhook == sys.__breakpointhook__
def test_check():
assert sys.breakpointhook == pytestPDB.set_trace
"""
)
testdir.makepyfile(
"""
def test_nothing(): pass
"""
)
args = (arg,) if arg else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*1 passed in *"])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
def test_pdb_custom_cls(self, testdir, custom_debugger_hook):
p1 = testdir.makepyfile(
"""
def test_nothing():
breakpoint()
"""
)
result = testdir.runpytest_inprocess(
"--pdb", "--pdbcls=_pytest:_CustomDebugger", p1
)
result.stdout.fnmatch_lines(["*CustomDebugger*", "*1 passed*"])
assert custom_debugger_hook == ["init", "set_trace"]
@pytest.mark.parametrize("arg", ["--pdb", ""])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
def test_environ_custom_class(self, testdir, custom_debugger_hook, arg):
testdir.makeconftest(
"""
import os
import sys
os.environ['PYTHONBREAKPOINT'] = '_pytest._CustomDebugger.set_trace'
def pytest_configure(config):
config._cleanup.append(check_restored)
def check_restored():
assert sys.breakpointhook == sys.__breakpointhook__
def test_check():
import _pytest
assert sys.breakpointhook is _pytest._CustomDebugger.set_trace
"""
)
testdir.makepyfile(
"""
def test_nothing(): pass
"""
)
args = (arg,) if arg else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*1 passed in *"])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
@pytest.mark.skipif(
not _ENVIRON_PYTHONBREAKPOINT == "",
reason="Requires breakpoint() default value",
)
def test_sys_breakpoint_interception(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
breakpoint()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
def test_pdb_not_altered(self, testdir):
p1 = testdir.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
class TestTraceOption:
def test_trace_sets_breakpoint(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
assert True
"""
)
child = testdir.spawn_pytest("--trace " + str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 passed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
def test_trace_against_yield_test(self, testdir):
p1 = testdir.makepyfile(
"""
def is_equal(a, b):
assert a == b
def test_1():
yield is_equal, 1, 1
"""
)
child = testdir.spawn_pytest("--trace " + str(p1))
child.expect("is_equal")
child.expect("Pdb")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 passed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
| {
"content_hash": "2618e5c05bd4b61fda09ff55541637d4",
"timestamp": "",
"source": "github",
"line_count": 756,
"max_line_length": 94,
"avg_line_length": 30.21031746031746,
"alnum_prop": 0.5210823591225535,
"repo_name": "davidszotten/pytest",
"id": "4739f0e2dca4cae723c08bccd945cdea79b8b6c6",
"size": "22839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_pdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1293"
},
{
"name": "Python",
"bytes": "1760297"
}
],
"symlink_target": ""
} |
import tensorflow as tf
from tensorforce import util
from tensorforce.core.explorations import Exploration
class EpsilonAnneal(Exploration):
"""
Annealing epsilon parameter based on ratio of current timestep to total timesteps.
"""
def __init__(
self,
initial_epsilon=1.0,
final_epsilon=0.1,
timesteps=10000,
start_timestep=0,
scope='epsilon_anneal',
summary_labels=()
):
self.initial_epsilon = initial_epsilon
self.final_epsilon = final_epsilon
self.timesteps = timesteps
self.start_timestep = start_timestep
super(EpsilonAnneal, self).__init__(scope=scope, summary_labels=summary_labels)
def tf_explore(self, episode, timestep, action_spec=None):
def true_fn():
# Know if first is not true second must be true from outer cond check.
return tf.cond(
pred=(timestep < self.start_timestep),
true_fn=(lambda: self.initial_epsilon),
false_fn=(lambda: self.final_epsilon)
)
def false_fn():
completed_ratio = (tf.cast(x=timestep, dtype=util.tf_dtype('float')) - self.start_timestep) / self.timesteps
return self.initial_epsilon + completed_ratio * (self.final_epsilon - self.initial_epsilon)
pred = tf.logical_or(x=(timestep < self.start_timestep), y=(timestep > self.start_timestep + self.timesteps))
return tf.cond(pred=pred, true_fn=true_fn, false_fn=false_fn)
| {
"content_hash": "7e40e5fcc03316df8683ce3e60558489",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 120,
"avg_line_length": 35.53488372093023,
"alnum_prop": 0.6295811518324608,
"repo_name": "lefnire/tensorforce",
"id": "9975c8f30bb9e78fb8d12717ce250b3c510791b8",
"size": "2208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorforce/core/explorations/epsilon_anneal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "753422"
}
],
"symlink_target": ""
} |
import json
import uuid
from openstackclient.tests.functional.network.v2 import common
class TestMeterRule(common.NetworkTests):
"""Functional tests for meter rule"""
METER_ID = None
METER_RULE_ID = None
@classmethod
def setUpClass(cls):
common.NetworkTests.setUpClass()
if cls.haz_network:
cls.METER_NAME = uuid.uuid4().hex
json_output = json.loads(cls.openstack(
'network meter create -f json ' +
cls.METER_NAME
))
cls.METER_ID = json_output.get('id')
@classmethod
def tearDownClass(cls):
try:
if cls.haz_network:
raw_output = cls.openstack(
'network meter delete ' +
cls.METER_ID
)
cls.assertOutput('', raw_output)
finally:
common.NetworkTests.tearDownClass()
def setUp(self):
super(TestMeterRule, self).setUp()
# Nothing in this class works with Nova Network
if not self.haz_network:
self.skipTest("No Network service present")
def test_meter_rule_delete(self):
"""test create, delete"""
json_output = json.loads(self.openstack(
'network meter rule create -f json ' +
'--remote-ip-prefix 10.0.0.0/8 ' +
self.METER_ID
))
rule_id = json_output.get('id')
re_ip = json_output.get('remote_ip_prefix')
self.addCleanup(
self.openstack,
'network meter rule delete ' + rule_id
)
self.assertIsNotNone(re_ip)
self.assertIsNotNone(rule_id)
self.assertEqual(
'10.0.0.0/8', re_ip
)
def test_meter_rule_list(self):
"""Test create, list, delete"""
json_output = json.loads(self.openstack(
'network meter rule create -f json ' +
'--remote-ip-prefix 10.0.0.0/8 ' +
self.METER_ID
))
rule_id_1 = json_output.get('id')
self.addCleanup(
self.openstack,
'network meter rule delete ' + rule_id_1
)
self.assertEqual(
'10.0.0.0/8',
json_output.get('remote_ip_prefix')
)
json_output_1 = json.loads(self.openstack(
'network meter rule create -f json ' +
'--remote-ip-prefix 11.0.0.0/8 ' +
self.METER_ID
))
rule_id_2 = json_output_1.get('id')
self.addCleanup(
self.openstack,
'network meter rule delete ' + rule_id_2
)
self.assertEqual(
'11.0.0.0/8',
json_output_1.get('remote_ip_prefix')
)
json_output = json.loads(self.openstack(
'network meter rule list -f json'
))
rule_id_list = [item.get('ID') for item in json_output]
ip_prefix_list = [item.get('Remote IP Prefix') for item in json_output]
self.assertIn(rule_id_1, rule_id_list)
self.assertIn(rule_id_2, rule_id_list)
self.assertIn('10.0.0.0/8', ip_prefix_list)
self.assertIn('11.0.0.0/8', ip_prefix_list)
def test_meter_rule_show(self):
"""Test create, show, delete"""
json_output = json.loads(self.openstack(
'network meter rule create -f json ' +
'--remote-ip-prefix 10.0.0.0/8 ' +
'--egress ' +
self.METER_ID
))
rule_id = json_output.get('id')
self.assertEqual(
'egress',
json_output.get('direction')
)
json_output = json.loads(self.openstack(
'network meter rule show -f json ' + rule_id
))
self.assertEqual(
'10.0.0.0/8',
json_output.get('remote_ip_prefix')
)
self.assertIsNotNone(rule_id)
self.addCleanup(
self.openstack,
'network meter rule delete ' + rule_id
)
| {
"content_hash": "9f815c9043fccfd64c96b0f4706efc39",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 79,
"avg_line_length": 30.174242424242426,
"alnum_prop": 0.5247301029374843,
"repo_name": "openstack/python-openstackclient",
"id": "31bc08453b3c3813968b1b88ee1559e85669ad12",
"size": "4622",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstackclient/tests/functional/network/v2/test_network_meter_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "923"
},
{
"name": "Python",
"bytes": "5016301"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages # noqa
setup(
name='pecan-wtforms',
version='0.1.0',
description="""
""",
long_description=None,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python'
],
keywords='',
url='',
author='Ryan Petrello',
author_email='ryan (at) ryanpetrello.com',
license='MIT',
install_requires=['pecan', 'wtforms'],
tests_require=['WebTest >= 1.3.1'], # py3 compat
test_suite='pecan_wtforms.tests',
zip_safe=False,
packages=find_packages(exclude=['ez_setup']),
entry_points="""
[pecan.extension]
wtforms = pecan_wtforms
"""
)
| {
"content_hash": "4122b17026f2c4775f832d63e0b07323",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 55,
"avg_line_length": 29.342105263157894,
"alnum_prop": 0.6134529147982063,
"repo_name": "ryanpetrello/pecan-wtforms",
"id": "330d3e340546d1292dae0823c6e3f20c366c506a",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "262"
},
{
"name": "Python",
"bytes": "49067"
}
],
"symlink_target": ""
} |
TDA_LOOKUP_COLUMN = {}
import re, sys, configparser
import os
from os import listdir
from os.path import isfile, join
import glob
import argparse
import shutil
from pynwn.file.twoda import TwoDA
MDL_NAME_REGEX = re.compile(r'p(\D\D)(\d+_head)(\d+).mdl', flags=re.IGNORECASE)
PLT_NAME_REGEX = re.compile(r'(.+)_head(\d+).plt', flags=re.IGNORECASE)
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', action='version', version='0.1')
parser.add_argument('-t', '--twoda', help='2da head lookup table. Default: headmodel.2da', default='headmodel.2da')
parser.add_argument('--use-nontoolset', help='Flag to use head model IDs that cannot be used in the toolset.',
default=False, action='store_true')
parser.add_argument('output', help='Output directory.')
parser.add_argument('input', help='Input directories', nargs='+')
args = parser.parse_args()
def load_2da_lookup(config, output, tda):
global TDA_LOOKUP_COLUMN
path = join(output, tda)
if not isfile(path):
print("Unable to load lookup table: %s" % path, file=sys.stderr)
return None
for opt in config.options('Lookup'):
TDA_LOOKUP_COLUMN[opt] = config.get('Lookup', opt)
return TwoDA(path)
def update_mdl(dir, mdl, new_id):
m = os.path.basename(mdl).lower()
match = MDL_NAME_REGEX.match(m)
if not match: return
mtype, stuff, mid = match.groups()
new_name = 'p' + mtype + stuff + str(new_id).zfill(3) + ".mdl"
with open(join(args.output, new_name), 'w') as new_file:
with open(mdl, 'r') as old_file:
print("Creating new mdl: %s -> %s" % (mdl, new_name))
for line in old_file:
new_file.write(line.replace('head' + str(mid).zfill(3),
'head' + str(new_id).zfill(3)))
glob_for = join(dir, 'p' + mtype +'*_head' + str(mid).zfill(3) + '.plt')
for plt in glob.glob(glob_for):
new_plt = 'p'+mtype+stuff+'%s.plt' % str(new_id).zfill(3)
print("Copying PLT: %s -> %s" % (plt, new_plt))
shutil.copy2(plt, join(args.output, new_plt))
def getScriptPath():
return os.path.dirname(os.path.realpath(sys.argv[0]))
def get_next_row(mtype, tda, skip):
l = TDA_LOOKUP_COLUMN[mtype]
col = tda.get_column_index(l) - 1
for i, r in enumerate(tda.rows):
if skip and (i >= 50 and i <= 99):
continue
test = tda.get_int(i, col)
if test != i:
tda.set(i, col, str(i))
return i
newrow = ['****'] * len(tda.rows[0])
tda.rows.append(newrow)
i = len(tda.rows) - 1
tda.set(i, col, str(i))
return i
def run(inputs, skip, output):
for dir in inputs:
for f in listdir(dir):
p = join(dir, f)
if not isfile(p): continue
match = MDL_NAME_REGEX.match(f)
if not match: continue
mtype, _, mid = match.groups()
nextid = get_next_row(mtype, tda, skip)
update_mdl(dir, p, nextid)
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read(join(getScriptPath(), 'head_renamer.ini'))
tda = load_2da_lookup(config, args.output, args.twoda)
if not tda is None:
run(args.input, not args.use_nontoolset, args.output)
print("Updating 2da: " + join(args.output, args.twoda + '.new'))
with open(join(args.output, args.twoda + '.new'), 'w') as f:
f.write(str(tda))
else:
print("Error: Unable to load lookup table!", file=sys.stderr)
| {
"content_hash": "a6a470ccb86a0ee55a5e9cfc455e31f2",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 115,
"avg_line_length": 36.9375,
"alnum_prop": 0.5947546531302876,
"repo_name": "jd28/pynwn-tools",
"id": "7cd81bf297ca3b49fb4e02d76ebee423505e70ae",
"size": "3569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "head_renamer/head_renamer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65915"
},
{
"name": "Shell",
"bytes": "356"
}
],
"symlink_target": ""
} |
import asyncio
import hashlib
import os
from .connection import ObfuscatedConnection
from .tcpabridged import AbridgedPacketCodec
from .tcpintermediate import (
IntermediatePacketCodec,
RandomizedIntermediatePacketCodec
)
from ...crypto import AESModeCTR
class MTProxyIO:
"""
It's very similar to tcpobfuscated.ObfuscatedIO, but the way
encryption keys, protocol tag and dc_id are encoded is different.
"""
header = None
def __init__(self, connection):
self._reader = connection._reader
self._writer = connection._writer
(self.header,
self._encrypt,
self._decrypt) = self.init_header(
connection._secret, connection._dc_id, connection.packet_codec)
@staticmethod
def init_header(secret, dc_id, packet_codec):
# Validate
is_dd = (len(secret) == 17) and (secret[0] == 0xDD)
is_rand_codec = issubclass(
packet_codec, RandomizedIntermediatePacketCodec)
if is_dd and not is_rand_codec:
raise ValueError(
"Only RandomizedIntermediate can be used with dd-secrets")
secret = secret[1:] if is_dd else secret
if len(secret) != 16:
raise ValueError(
"MTProxy secret must be a hex-string representing 16 bytes")
# Obfuscated messages secrets cannot start with any of these
keywords = (b'PVrG', b'GET ', b'POST', b'\xee\xee\xee\xee')
while True:
random = os.urandom(64)
if (random[0] != 0xef and
random[:4] not in keywords and
random[4:4] != b'\0\0\0\0'):
break
random = bytearray(random)
random_reversed = random[55:7:-1] # Reversed (8, len=48)
# Encryption has "continuous buffer" enabled
encrypt_key = hashlib.sha256(
bytes(random[8:40]) + secret).digest()
encrypt_iv = bytes(random[40:56])
decrypt_key = hashlib.sha256(
bytes(random_reversed[:32]) + secret).digest()
decrypt_iv = bytes(random_reversed[32:48])
encryptor = AESModeCTR(encrypt_key, encrypt_iv)
decryptor = AESModeCTR(decrypt_key, decrypt_iv)
random[56:60] = packet_codec.obfuscate_tag
dc_id_bytes = dc_id.to_bytes(2, "little", signed=True)
random = random[:60] + dc_id_bytes + random[62:]
random[56:64] = encryptor.encrypt(bytes(random))[56:64]
return (random, encryptor, decryptor)
async def readexactly(self, n):
return self._decrypt.encrypt(await self._reader.readexactly(n))
def write(self, data):
self._writer.write(self._encrypt.encrypt(data))
class TcpMTProxy(ObfuscatedConnection):
"""
Connector which allows user to connect to the Telegram via proxy servers
commonly known as MTProxy.
Implemented very ugly due to the leaky abstractions in Telethon networking
classes that should be refactored later (TODO).
.. warning::
The support for TcpMTProxy classes is **EXPERIMENTAL** and prone to
be changed. You shouldn't be using this class yet.
"""
packet_codec = None
obfuscated_io = MTProxyIO
# noinspection PyUnusedLocal
def __init__(self, ip, port, dc_id, *, loggers, proxy=None, local_addr=None):
# connect to proxy's host and port instead of telegram's ones
proxy_host, proxy_port = self.address_info(proxy)
self._secret = bytes.fromhex(proxy[2])
super().__init__(
proxy_host, proxy_port, dc_id, loggers=loggers)
async def _connect(self, timeout=None, ssl=None):
await super()._connect(timeout=timeout, ssl=ssl)
# Wait for EOF for 2 seconds (or if _wait_for_data's definition
# is missing or different, just sleep for 2 seconds). This way
# we give the proxy a chance to close the connection if the current
# codec (which the proxy detects with the data we sent) cannot
# be used for this proxy. This is a work around for #1134.
# TODO Sleeping for N seconds may not be the best solution
# TODO This fix could be welcome for HTTP proxies as well
try:
await asyncio.wait_for(self._reader._wait_for_data('proxy'), 2)
except asyncio.TimeoutError:
pass
except Exception:
await asyncio.sleep(2)
if self._reader.at_eof():
await self.disconnect()
raise ConnectionError(
'Proxy closed the connection after sending initial payload')
@staticmethod
def address_info(proxy_info):
if proxy_info is None:
raise ValueError("No proxy info specified for MTProxy connection")
return proxy_info[:2]
class ConnectionTcpMTProxyAbridged(TcpMTProxy):
"""
Connect to proxy using abridged protocol
"""
packet_codec = AbridgedPacketCodec
class ConnectionTcpMTProxyIntermediate(TcpMTProxy):
"""
Connect to proxy using intermediate protocol
"""
packet_codec = IntermediatePacketCodec
class ConnectionTcpMTProxyRandomizedIntermediate(TcpMTProxy):
"""
Connect to proxy using randomized intermediate protocol (dd-secrets)
"""
packet_codec = RandomizedIntermediatePacketCodec
| {
"content_hash": "28b21a86edb8ffd7be657bfc46d00d4d",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 81,
"avg_line_length": 34.73026315789474,
"alnum_prop": 0.6395150596703921,
"repo_name": "LonamiWebs/Telethon",
"id": "69a43bce9916d17a931fed41a16c6792e979090a",
"size": "5279",
"binary": false,
"copies": "1",
"ref": "refs/heads/v1",
"path": "telethon/network/connection/tcpmtproxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "776"
},
{
"name": "CSS",
"bytes": "9611"
},
{
"name": "HTML",
"bytes": "8839"
},
{
"name": "JavaScript",
"bytes": "7489"
},
{
"name": "Makefile",
"bytes": "605"
},
{
"name": "Python",
"bytes": "1091881"
},
{
"name": "Shell",
"bytes": "352"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
DESCRIPTION = "FFXIV Lodestone Scraper"
with open('README.md') as f:
LONG_DESCRIPTION = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
VERSION = '0.1.18'
CLASSIFIERS = [
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules']
setup(name='ffxivscraper',
version=VERSION,
packages=find_packages(),
install_requires=required,
scripts=['lodestoner'],
author='Stanislav Vishnevskiy',
author_email='vishnevskiy@gmail.com',
maintainer='Matthew Scragg',
maintainer_email='scragg@gmail.com',
url='https://github.com/scragg0x/FFXIV-Scraper',
license='MIT',
include_package_data=True,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=CLASSIFIERS)
| {
"content_hash": "0de58114d206b3698cd74334cba6c33f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 67,
"avg_line_length": 29.542857142857144,
"alnum_prop": 0.6673114119922631,
"repo_name": "scragg0x/FFXIV-Scraper",
"id": "2cfe1b9a8b06f3820d02ba9d786194b3b7dcd4f3",
"size": "1034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21831"
}
],
"symlink_target": ""
} |
import warnings
from collections import defaultdict
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
try:
import xml.etree.ElementTree as etree
except ImportError:
print("Failed to import ElementTree from any known place")
class PomdpXReader:
"""
Class for reading PomdpX file format from files or strings
"""
def __init__(self, path=None, string=None):
"""
Initialize an instance of PomdpX reader class
Parameters
----------
path : file or str
Path of the file containing PomdpX information.
string : str
String containing PomdpX information.
Example
-------
reader = PomdpXReader('TestPomdpX.xml')
Reference
---------
http://bigbird.comp.nus.edu.sg/pmwiki/farm/appl/index.php?n=Main.PomdpXDocumentation
"""
if path:
self.network = etree.ElementTree(file=path).getroot()
elif string:
self.network = etree.fromstring(string)
else:
raise ValueError("Must specify either path or string")
def get_description(self):
"""
Return the problem description
Example
>>> reader = PomdpXReader('Test_Pomdpx.xml')
>>> reader.get_description()
'RockSample problem for map size 1 x 3.
Rock is at 0, Rover’s initial position is at 1.
Exit is at 2.'
--------
>>> reader = PomdpXReader('Test_PomdpX.xml')
>>> reader.get_description()
'RockSample problem for map size 1 x 3.
Rock is at 0, Rover’s initial position is at 1.
Exit is at 2.'
"""
return self.network.find('Description').text
def get_discount(self):
"""
Returns the discount factor for the problem
Example
--------
>>> reader = PomdpXReader('Test_PomdpX.xml')
>>> reader.get_discount()
0.95
"""
return float(self.network.find('Discount').text)
def get_variables(self):
"""
Returns list of variables of the network
Example
-------
>>> reader = PomdpXReader("pomdpx.xml")
>>> reader.get_variables()
{'StateVar': [
{'vnamePrev': 'rover_0',
'vnameCurr': 'rover_1',
'ValueEnum': ['s0', 's1', 's2'],
'fullyObs': True},
{'vnamePrev': 'rock_0',
'vnameCurr': 'rock_1',
'fullyObs': False,
'ValueEnum': ['good', 'bad']}],
'ObsVar': [{'vname': 'obs_sensor',
'ValueEnum': ['ogood', 'obad']}],
'RewardVar': [{'vname': 'reward_rover'}],
'ActionVar': [{'vname': 'action_rover',
'ValueEnum': ['amw', 'ame',
'ac', 'as']}]
}
"""
self.variables = defaultdict(list)
for variable in self.network.findall('Variable'):
_variables = defaultdict(list)
for var in variable.findall('StateVar'):
state_variables = defaultdict(list)
state_variables['vnamePrev'] = var.get('vnamePrev')
state_variables['vnameCurr'] = var.get('vnameCurr')
if var.get('fullyObs'):
state_variables['fullyObs'] = True
else:
state_variables['fullyObs'] = False
state_variables['ValueEnum'] = []
if var.find('NumValues') is not None:
for i in range(0, int(var.find('NumValues').text)):
state_variables['ValueEnum'].append('s' + str(i))
if var.find('ValueEnum') is not None:
state_variables['ValueEnum'] = \
var.find('ValueEnum').text.split()
_variables['StateVar'].append(state_variables)
for var in variable.findall('ObsVar'):
obs_variables = defaultdict(list)
obs_variables['vname'] = var.get('vname')
obs_variables['ValueEnum'] = \
var.find('ValueEnum').text.split()
_variables['ObsVar'].append(obs_variables)
for var in variable.findall('ActionVar'):
action_variables = defaultdict(list)
action_variables['vname'] = var.get('vname')
action_variables['ValueEnum'] = \
var.find('ValueEnum').text.split()
_variables['ActionVar'].append(action_variables)
for var in variable.findall('RewardVar'):
reward_variables = defaultdict(list)
reward_variables['vname'] = var.get('vname')
_variables['RewardVar'].append(reward_variables)
self.variables.update(_variables)
return self.variables
def get_initial_beliefs(self):
"""
Returns the state, action and observation variables as a dictionary
in the case of table type parameter and a nested structure in case of
decision diagram parameter
Examples
--------
>>> reader = PomdpXReader('Test_PomdpX.xml')
>>> reader.get_initial_beliefs()
[{'Var': 'rover_0',
'Parent': ['null'],
'Type': 'TBL',
'Parameter': [{'Instance': ['-'],
'ProbTable': ['0.0', '1.0', '0.0']}]
},
{'Var': '',
'...': ...,'
'...': '...',
}]
"""
initial_state_belief = []
for variable in self.network.findall('InitialStateBelief'):
for var in variable.findall('CondProb'):
cond_prob = defaultdict(list)
cond_prob['Var'] = var.find('Var').text
cond_prob['Parent'] = var.find('Parent').text.split()
if not var.find('Parameter').get('type'):
cond_prob['Type'] = 'TBL'
else:
cond_prob['Type'] = var.find('Parameter').get('type')
cond_prob['Parameter'] = self.get_parameter(var)
initial_state_belief.append(cond_prob)
return initial_state_belief
def get_state_transition_function(self):
"""
Returns the transition of the state variables as nested dict in the
case of table type parameter and a nested structure in case of
decision diagram parameter
Example
--------
>>> reader = PomdpXReader('Test_PomdpX.xml')
>>> reader.get_state_transition_function()
[{'Var': 'rover_1',
'Parent': ['action_rover', 'rover_0'],
'Type': 'TBL',
'Parameter': [{'Instance': ['amw', 's0', 's2'],
'ProbTable': ['1.0']},
{'Instance': ['amw', 's1', 's0'],
'ProbTable': ['1.0']},
...
]
}]
"""
state_transition_function = []
for variable in self.network.findall('StateTransitionFunction'):
for var in variable.findall('CondProb'):
cond_prob = defaultdict(list)
cond_prob['Var'] = var.find('Var').text
cond_prob['Parent'] = var.find('Parent').text.split()
if not var.find('Parameter').get('type'):
cond_prob['Type'] = 'TBL'
else:
cond_prob['Type'] = var.find('Parameter').get('type')
cond_prob['Parameter'] = self.get_parameter(var)
state_transition_function.append(cond_prob)
return state_transition_function
def get_obs_function(self):
"""
Returns the observation function as nested dict in the case of table-
type parameter and a nested structure in case of
decision diagram parameter
Example
--------
>>> reader = PomdpXReader('Test_PomdpX.xml')
>>> reader.get_obs_function()
[{'Var': 'obs_sensor',
'Parent': ['action_rover', 'rover_1', 'rock_1'],
'Type': 'TBL',
'Parameter': [{'Instance': ['amw', '*', '*', '-'],
'ProbTable': ['1.0', '0.0']},
...
]
}]
"""
obs_function = []
for variable in self.network.findall('ObsFunction'):
for var in variable.findall('CondProb'):
cond_prob = defaultdict(list)
cond_prob['Var'] = var.find('Var').text
cond_prob['Parent'] = var.find('Parent').text.split()
if not var.find('Parameter').get('type'):
cond_prob['Type'] = 'TBL'
else:
cond_prob['Type'] = var.find('Parameter').get('type')
cond_prob['Parameter'] = self.get_parameter(var)
obs_function.append(cond_prob)
return obs_function
def get_reward_function(self):
"""
Returns the reward function as nested dict in the case of table-
type parameter and a nested structure in case of
decision diagram parameter
Example
--------
>>> reader = PomdpXReader('Test_PomdpX.xml')
>>> reader.get_reward_function()
[{'Var': 'reward_rover',
'Parent': ['action_rover', 'rover_0', 'rock_0'],
'Type': 'TBL',
'Parameter': [{'Instance': ['ame', 's1', '*'],
'ValueTable': ['10']},
...
]
}]
"""
reward_function = []
for variable in self.network.findall('RewardFunction'):
for var in variable.findall('Func'):
func = defaultdict(list)
func['Var'] = var.find('Var').text
func['Parent'] = var.find('Parent').text.split()
if not var.find('Parameter').get('type'):
func['Type'] = 'TBL'
else:
func['Type'] = var.find('Parameter').get('type')
func['Parameter'] = self.get_parameter(var)
reward_function.append(func)
return reward_function
def get_parameter(self, var):
"""
This method supports the functional tags by providing the actual
values in the function as list of dict in case of table type parameter or as
nested dict in case of decision diagram
"""
parameter = []
for parameter_tag in var.findall('Parameter'):
parameter_type = 'TBL'
if parameter_tag.get('type') is not None:
parameter_type = parameter_tag.get('type')
if parameter_type == 'TBL':
parameter = self.get_parameter_tbl(parameter_tag)
elif parameter_type == 'DD':
parameter = defaultdict(list)
parameter = self.get_parameter_dd(parameter_tag)
return parameter
def get_parameter_tbl(self, parameter):
"""
This method returns parameters as list of dict in case of table type
parameter
"""
par = []
for entry in parameter.findall('Entry'):
instance = defaultdict(list)
instance['Instance'] = entry.find('Instance').text.split()
if entry.find('ProbTable') is None:
instance['ValueTable'] = entry.find('ValueTable').text.split()
else:
instance['ProbTable'] = entry.find('ProbTable').text.split()
par.append(instance)
return par
def get_parameter_dd(self, parameter):
"""
This method returns parameters as nested dicts in case of decision
diagram parameter.
"""
dag = defaultdict(list)
dag_elem = parameter.find('DAG')
node = dag_elem.find('Node')
root = node.get('var')
def get_param(node):
edges = defaultdict(list)
for edge in node.findall('Edge'):
if edge.find('Terminal') is not None:
edges[edge.get('val')] = edge.find('Terminal').text
elif edge.find('Node') is not None:
node_cpd = defaultdict(list)
node_cpd[edge.find('Node').get('var')] = \
get_param(edge.find('Node'))
edges[edge.get('val')] = node_cpd
elif edge.find('SubDAG') is not None:
subdag_attribute = defaultdict(list)
subdag_attribute['type'] = edge.find('SubDAG').get('type')
if subdag_attribute['type'] == 'template':
subdag_attribute['idref'] = \
edge.find('SubDAG').get('idref')
if edge.find('SubDAG').get('var'):
subdag_attribute['var'] = \
edge.find('SubDAG').get('var')
if edge.find('SubDAG').get('val'):
subdag_attribute['val'] = \
edge.find('SubDAG').get('val')
edges[edge.get('val')] = subdag_attribute
return edges
if parameter.find('SubDAGTemplate'):
SubDAGTemplate = parameter.find('SubDAGTemplate')
subdag_root = SubDAGTemplate.find('Node')
subdag_node = subdag_root.get('var')
subdag_dict = defaultdict(list)
subdag_dict[subdag_node] = get_param(subdag_root)
dag['SubDAGTemplate'] = subdag_dict
dag['id'] = SubDAGTemplate.get('id')
dag[root] = get_param(node)
return dag
class PomdpXWriter():
"""
Class for writing models in PomdpX
"""
def __init__(self, model_data, encoding='utf-8', prettyprint=True):
"""
Initialise a PomdpXWriter Object
Parameters
---------------
model: A Bayesian of Markov Model
The model to write
encoding: String(optional)
Encoding for text data
prettyprint: Bool(optional)
Indentation in output XML if true
"""
self.model = model_data
self.encoding = encoding
self.prettyprint = prettyprint
self.xml = etree.Element("pomdpx", attrib={'version': '1.0'})
self.description = etree.SubElement(self.xml, 'Description')
self.discount = etree.SubElement(self.xml, 'Discount')
self.variable = etree.SubElement(self.xml, 'Variable')
self.initial_belief = etree.SubElement(self.xml, 'InitialStateBelief')
self.transition_function = etree.SubElement(self.xml, 'StateTransitionFunction')
self.observation_function = etree.SubElement(self.xml, 'ObsFunction')
self.reward_function = etree.SubElement(self.xml, 'RewardFunction')
def __str__(self, xml):
"""
Return the XML as string.
"""
if self.prettyprint:
self.indent(xml)
return etree.tostring(xml, encoding=self.encoding)
def indent(self, elem, level=0):
"""
Inplace prettyprint formatter.
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def _add_value_enum(self, var, tag):
"""
supports adding variables to the xml
Parameters
---------------
var: The SubElement variable
tag: The SubElement tag to which enum value is to be added
Return
---------------
None
"""
if var['ValueEnum'][0] == 's0':
numvalues_tag = etree.SubElement(tag, 'NumValues')
numvalues_tag.text = str(int(var['ValueEnum'][-1][-1]) + 1)
else:
valueenum_tag = etree.SubElement(tag, 'ValueEnum')
valueenum_tag.text = ''
for value in var['ValueEnum']:
valueenum_tag.text += value + ' '
valueenum_tag.text = valueenum_tag.text[:-1]
def get_variables(self):
"""
Add variables to PomdpX
Return
---------------
xml containing variables tag
"""
state_variables = self.model['variables']['StateVar']
for var in state_variables:
state_var_tag = etree.SubElement(self.variable, 'StateVar', attrib={'vnamePrev': var['vnamePrev'],
'vnameCurr': var['vnameCurr'],
'fullyObs': 'true' if var['fullyObs']
else 'false'})
self._add_value_enum(var, state_var_tag)
obs_variables = self.model['variables']['ObsVar']
for var in obs_variables:
obs_var_tag = etree.SubElement(self.variable, 'ObsVar', attrib={'vname': var['vname']})
self._add_value_enum(var, obs_var_tag)
action_variables = self.model['variables']['ActionVar']
for var in action_variables:
action_var_tag = etree.SubElement(self.variable, 'ActionVar', attrib={'vname': var['vname']})
self._add_value_enum(var, action_var_tag)
reward_var = self.model['variables']['RewardVar']
for var in reward_var:
etree.SubElement(self.variable, 'RewardVar', attrib={'vname': var['vname']})
return self.__str__(self.variable)[:-1]
def add_parameter_dd(self, dag_tag, node_dict):
"""
helper function for adding parameters in condition
Parameters
---------------
dag_tag: etree SubElement
the DAG tag is contained in this subelement
node_dict: dictionary
the decision diagram dictionary
Return
---------------
None
"""
if isinstance(node_dict, defaultdict) or isinstance(node_dict, dict):
node_tag = etree.SubElement(dag_tag, 'Node', attrib={'var': next(iter(node_dict.keys()))})
edge_dict = next(iter(node_dict.values()))
for edge in sorted(edge_dict.keys(), key=tuple):
edge_tag = etree.SubElement(node_tag, 'Edge', attrib={'val': edge})
value = edge_dict.get(edge)
if isinstance(value, str):
terminal_tag = etree.SubElement(edge_tag, 'Terminal')
terminal_tag.text = value
elif 'type' in value:
if 'val' in value:
etree.SubElement(edge_tag, 'SubDAG',
attrib={'type': value['type'], 'var': value['var'], 'val': value['val']})
elif 'idref' in value:
etree.SubElement(edge_tag, 'SubDAG', attrib={'type': value['type'], 'idref': value['idref']})
else:
etree.SubElement(edge_tag, 'SubDAG', attrib={'type': value['type'], 'var': value['var']})
else:
self.add_parameter_dd(edge_tag, value)
def add_conditions(self, condition, condprob):
"""
helper function for adding probability conditions for model\
Parameters
---------------
condition: dictionary
contains and element of conditions list
condprob: etree SubElement
the tag to which condition is added
Return
---------------
None
"""
var_tag = etree.SubElement(condprob, 'Var')
var_tag.text = condition['Var']
parent_tag = etree.SubElement(condprob, 'Parent')
parent_tag.text = ''
for parent in condition['Parent']:
parent_tag.text += parent + ' '
parent_tag.text = parent_tag.text[:-1]
parameter_tag = etree.SubElement(condprob, 'Parameter', attrib={'type': condition['Type']
if condition['Type'] is not None
else 'TBL'})
if condition['Type'] == 'DD':
dag_tag = etree.SubElement(parameter_tag, 'DAG')
parameter_dict = condition['Parameter']
if 'SubDAGTemplate' in parameter_dict:
subdag_tag = etree.SubElement(parameter_tag, 'SubDAGTemplate', attrib={'id': parameter_dict['id']})
self.add_parameter_dd(subdag_tag, parameter_dict['SubDAGTemplate'])
del parameter_dict['SubDAGTemplate']
del parameter_dict['id']
self.add_parameter_dd(dag_tag, parameter_dict)
else:
self.add_parameter_dd(dag_tag, parameter_dict)
else:
for parameter in condition['Parameter']:
entry = etree.SubElement(parameter_tag, 'Entry')
instance = etree.SubElement(entry, 'Instance')
instance.text = ''
for instance_var in parameter['Instance']:
instance.text += instance_var + ' '
length_instance = len(parameter['Instance'])
if len(parameter['Instance'][length_instance - 1]) > 1:
instance.text = instance.text[:-1]
if len(parameter['Instance']) == 1:
instance.text = ' ' + instance.text
if condprob.tag == 'Func':
table = 'ValueTable'
else:
table = 'ProbTable'
prob_table = parameter[table]
prob_table_tag = etree.SubElement(entry, table)
prob_table_tag.text = ''
for probability in prob_table:
prob_table_tag.text += probability + ' '
prob_table_tag.text = prob_table_tag.text[:-1]
def add_initial_belief(self):
"""
add initial belief tag to pomdpx model
Return
---------------
string containing the xml for initial belief tag
"""
initial_belief = self.model['initial_state_belief']
for condition in initial_belief:
condprob = etree.SubElement(self.initial_belief, 'CondProb')
self.add_conditions(condition, condprob)
return self.__str__(self.initial_belief)[:-1]
def add_state_transition_function(self):
"""
add state transition function tag to pomdpx model
Return
---------------
string containing the xml for state transition tag
"""
state_transition_function = self.model['state_transition_function']
for condition in state_transition_function:
condprob = etree.SubElement(self.transition_function, 'CondProb')
self.add_conditions(condition, condprob)
return self.__str__(self.transition_function)[:-1]
def add_obs_function(self):
"""
add observation function tag to pomdpx model
Return
---------------
string containing the xml for observation function tag
"""
obs_function = self.model['obs_function']
for condition in obs_function:
condprob = etree.SubElement(self.observation_function, 'CondProb')
self.add_conditions(condition, condprob)
return self.__str__(self.observation_function)[:-1]
def add_reward_function(self):
"""
add reward function tag to pomdpx model
Return
---------------
string containing the xml for reward function tag
"""
reward_function = self.model['reward_function']
for condition in reward_function:
condprob = etree.SubElement(self.reward_function, 'Func')
self.add_conditions(condition, condprob)
return self.__str__(self.reward_function)[:-1]
| {
"content_hash": "7dc212d4aeba4bd966fd8e53c576bc44",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 117,
"avg_line_length": 39.17115689381934,
"alnum_prop": 0.5082331998219849,
"repo_name": "kislayabhi/pgmpy",
"id": "ca26cf9e0599f40f32016378e858582798f5c904",
"size": "24743",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pgmpy/readwrite/PomdpX.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6758"
},
{
"name": "Python",
"bytes": "499270"
},
{
"name": "Shell",
"bytes": "7721"
}
],
"symlink_target": ""
} |
"""Django's command-line utility for administrative tasks."""
import os
def main():
print("SECRET_KEY: {0}".format(os.getenv('SECRET_KEY')))
if __name__ == '__main__':
main()
| {
"content_hash": "baa42f963cbacd71b1a8af1792a914a8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 61,
"avg_line_length": 18.7,
"alnum_prop": 0.6096256684491979,
"repo_name": "dokku/dokku",
"id": "0c5ebba3b0fb30cbf18d530d08f089c388c46817",
"size": "209",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/apps/dockerfile-release/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Clojure",
"bytes": "666"
},
{
"name": "Dockerfile",
"bytes": "8971"
},
{
"name": "Go",
"bytes": "366060"
},
{
"name": "HTML",
"bytes": "5023"
},
{
"name": "Handlebars",
"bytes": "171"
},
{
"name": "Java",
"bytes": "853"
},
{
"name": "JavaScript",
"bytes": "7029"
},
{
"name": "Makefile",
"bytes": "33728"
},
{
"name": "PHP",
"bytes": "43"
},
{
"name": "Procfile",
"bytes": "8572"
},
{
"name": "Python",
"bytes": "30154"
},
{
"name": "Ruby",
"bytes": "221"
},
{
"name": "SCSS",
"bytes": "197"
},
{
"name": "Scala",
"bytes": "2235"
},
{
"name": "Shell",
"bytes": "792764"
}
],
"symlink_target": ""
} |
import png
r = png.Reader('GroupsAndLabels.png')
data = r.read()
width = data[0]
height = data[1]
pixels = list(data[2])
newPixels = list()
for row in pixels:
newRow = list()
for col in xrange(width):
alpha = row[4 * col]
newRow.append(255)
newRow.append(255)
newRow.append(255)
newRow.append(alpha)
newPixels.append(newRow)
outfile = open('GroupsAndLabelsMask.png', 'wb')
w = png.Writer(width, height, alpha=True)
w.write(outfile, newPixels)
outfile.close()
| {
"content_hash": "327fc9823d710cd3b66558615c277e6b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 47,
"avg_line_length": 22.695652173913043,
"alnum_prop": 0.6360153256704981,
"repo_name": "getdunne/SARAH",
"id": "b2d10bcc438ab6cdefad28816f35fad7f772d7a1",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Skinning/MakeGroupsAndLabelsMask.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "96833"
},
{
"name": "Python",
"bytes": "1047"
}
],
"symlink_target": ""
} |
import sys
import time
import argparse
import ConfigParser
from vnc_api.vnc_api import *
from cfgm_common.exceptions import *
class ConfigNodeProvisioner(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
connected = False
tries = 0
while not connected:
try:
self._vnc_lib = VncApi(
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/',
auth_host=self._args.openstack_ip)
connected = True
except ResourceExhaustionError: # haproxy throws 503
if tries < 10:
tries += 1
time.sleep(3)
else:
raise
gsc_obj = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
self._global_system_config_obj = gsc_obj
if self._args.oper == 'add':
self.add_config_node()
elif self._args.oper == 'del':
self.del_config_node()
else:
print "Unknown operation %s. Only 'add' and 'del' supported"\
% (self._args.oper)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python provision_config_node.py --host_name a3s30.contrail.juniper.net
--host_ip 10.1.1.1
--api_server_ip 127.0.0.1
--api_server_port 8082
--oper <add | del>
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'oper': 'add',
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--host_name", help="hostname name of config node", required=True)
parser.add_argument("--host_ip", help="IP address of config node", required=True)
parser.add_argument(
"--api_server_ip", help="IP address of api server", required=True)
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument(
"--oper", default='add',
help="Provision operation to be done(add or del)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
parser.add_argument(
"--admin_tenant_name", help="Tenamt name for keystone admin user")
parser.add_argument(
"--openstack_ip", help="IP address of openstack node")
self._args = parser.parse_args(remaining_argv)
# end _parse_args
def add_config_node(self):
gsc_obj = self._global_system_config_obj
config_node_obj = ConfigNode(
self._args.host_name, gsc_obj,
config_node_ip_address=self._args.host_ip)
config_node_exists = True
try:
config_node_obj = self._vnc_lib.config_node_read(
fq_name=config_node_obj.get_fq_name())
except NoIdError:
config_node_exists = False
if config_node_exists:
self._vnc_lib.config_node_update(config_node_obj)
else:
self._vnc_lib.config_node_create(config_node_obj)
# end add_config_node
def del_config_node(self):
gsc_obj = self._global_system_config_obj
config_node_obj = ConfigNode(self._args.host_name, gsc_obj)
self._vnc_lib.config_node_delete(
fq_name=config_node_obj.get_fq_name())
# end del_config_node
# end class ConfigNodeProvisioner
def main(args_str=None):
ConfigNodeProvisioner(args_str)
# end main
if __name__ == "__main__":
main()
| {
"content_hash": "210877d5919ab611dba897f3fa3f7b38",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 89,
"avg_line_length": 34.6258064516129,
"alnum_prop": 0.543692938326812,
"repo_name": "hthompson6/contrail-controller",
"id": "5c2c4539acb7321bf4c5a2f51da5c013b7ded59d",
"size": "5455",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "src/config/utils/provision_config_node.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49233"
},
{
"name": "C++",
"bytes": "16663239"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Groff",
"bytes": "36552"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "5736"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Protocol Buffer",
"bytes": "4744"
},
{
"name": "Python",
"bytes": "3905542"
},
{
"name": "Shell",
"bytes": "72572"
},
{
"name": "Thrift",
"bytes": "40763"
},
{
"name": "Yacc",
"bytes": "7737"
}
],
"symlink_target": ""
} |
import numpy as np
from nose.tools import eq_
from fancyimpute.knn import KNN
from low_rank_data import XY, XY_incomplete, missing_mask
def test_knn():
# get a baseline error from just zero-filling the missing entries
sad_zero_fill = np.sum(np.abs(XY[missing_mask]))
mad_zero_fill = sad_zero_fill / missing_mask.sum()
print("MAD zero-fill = ", mad_zero_fill)
for k in [5, 15, 30]:
print("-- k=", k)
XY_completed = KNN(k).fit_transform(XY_incomplete)
mask = np.isfinite(XY_completed)
eq_((~mask).sum(), 0)
diff = (XY_completed - XY)[missing_mask]
sad = np.sum(np.abs(diff))
print("Sum absolute differences", sad)
mad = sad / missing_mask.sum()
print("Mean absolute difference", mad)
# knnImpute should be at least twice as good as just zero fill
assert mad <= (mad_zero_fill / 2.0), \
"Expected knnImpute to be 2x better than zeroFill (%f) but got MAD=%f" % (
mad_zero_fill,
mad)
| {
"content_hash": "74aec7e3650736b62cf9b8997489260e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 36.964285714285715,
"alnum_prop": 0.6019323671497585,
"repo_name": "iskandr/fancyimpute",
"id": "b5f564f1b2e928d319a1bfe3aed5a17c2d3608b3",
"size": "1035",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_knn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "95506"
}
],
"symlink_target": ""
} |
import new, sys
import galaxy.util
import parameters
from parameters import basic
from parameters import grouping
from elementtree.ElementTree import XML
class ToolTestBuilder( object ):
"""
Encapsulates information about a tool test, and allows creation of a
dynamic TestCase class (the unittest framework is very class oriented,
doing dynamic tests in this was allows better integration)
"""
def __init__( self, tool, name, maxseconds ):
self.tool = tool
self.name = name
self.maxseconds = maxseconds
self.required_files = []
self.inputs = []
self.outputs = []
self.error = False
self.exception = None
def add_param( self, name, value, extra ):
try:
if name not in self.tool.inputs:
for input_name, input_value in self.tool.inputs.items():
if isinstance( input_value, grouping.Conditional ) or isinstance( input_value, grouping.Repeat ):
self.__expand_grouping_for_data_input(name, value, extra, input_name, input_value)
elif isinstance( self.tool.inputs[name], parameters.DataToolParameter ) and ( value, extra ) not in self.required_files:
self.required_files.append( ( value, extra ) )
except: pass
self.inputs.append( ( name, value, extra ) )
def add_output( self, name, file ):
self.outputs.append( ( name, file ) )
def __expand_grouping_for_data_input( self, name, value, extra, grouping_name, grouping_value ):
# Currently handles grouping.Conditional and grouping.Repeat
if isinstance( grouping_value, grouping.Conditional ):
if name != grouping_value.test_param.name:
for case in grouping_value.cases:
for case_input_name, case_input_value in case.inputs.items():
if case_input_name == name and isinstance( case_input_value, basic.DataToolParameter ) and ( value, extra ) not in self.required_files:
self.required_files.append( ( value, extra ) )
return True
elif isinstance( case_input_value, grouping.Conditional ):
self.__expand_grouping_for_data_input(name, value, extra, case_input_name, case_input_value)
elif isinstance( grouping_value, grouping.Repeat ):
# FIXME: grouping.Repeat can only handle 1 repeat param element since the param name
# is something like "input2" and the expanded page display is something like "queries_0|input2".
# The problem is that the only param name on the page is "input2", and adding more test input params
# with the same name ( "input2" ) is not yet supported in our test code ( the lat one added is the only
# one used ).
for input_name, input_value in grouping_value.inputs.items():
if input_name == name and isinstance( input_value, basic.DataToolParameter ) and ( value, extra ) not in self.required_files:
self.required_files.append( ( value, extra ) )
return True
| {
"content_hash": "0e83f9b06b947ca9f9ffa7c81efb3ffe",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 159,
"avg_line_length": 58,
"alnum_prop": 0.6241379310344828,
"repo_name": "volpino/Yeps-EURAC",
"id": "52e142bd301f6f3c16fe89d9d7d4971cdff73a86",
"size": "3190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/galaxy/tools/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1028241"
},
{
"name": "Perl",
"bytes": "46988"
},
{
"name": "Python",
"bytes": "3728724"
},
{
"name": "Shell",
"bytes": "24718"
}
],
"symlink_target": ""
} |
import lxml.etree
import lxml.html
from ._patterns import FORWARD_LINE, FORWARD_STYLES, MULTIPLE_WHITESPACE_RE
INLINE_TAGS = [
"a",
"b",
"em",
"i",
"strong",
"span",
"font",
"q",
"object",
"bdo",
"sub",
"sup",
"center",
"td",
"th",
]
BEGIN = "begin"
END = "end"
def trim_tree_after(element, include_element=True):
"""
Remove the document tree following the given element. If include_element
is True, the given element is kept in the tree, otherwise it is removed.
"""
el = element
for parent_el in element.iterancestors():
el.tail = None
if el != element or include_element:
el = el.getnext()
while el is not None:
remove_el = el
el = el.getnext()
parent_el.remove(remove_el)
el = parent_el
def trim_tree_before(element, include_element=True, keep_head=True):
"""
Remove the document tree preceding the given element. If include_element
is True, the given element is kept in the tree, otherwise it is removed.
"""
el = element
for parent_el in element.iterancestors():
parent_el.text = None
if el != element or include_element:
el = el.getprevious()
else:
parent_el.text = el.tail
while el is not None:
remove_el = el
el = el.getprevious()
tag = remove_el.tag
is_head = isinstance(tag, str) and tag.lower() == "head"
if not keep_head or not is_head:
parent_el.remove(remove_el)
el = parent_el
def trim_slice(lines, slice_tuple):
"""
Trim a slice tuple (begin, end) so it starts at the first non-empty line
(obtained via indented_tree_line_generator / get_line_info) and ends at the
last non-empty line within the slice. Returns the new slice.
"""
def _empty(line):
return not line or line.strip() == ">"
if not slice_tuple:
return None
slice_start, slice_end = slice_tuple
if slice_start is None:
slice_start = 0
if slice_end is None:
slice_end = len(lines)
# Trim from beginning
while slice_start < slice_end and _empty(lines[slice_start]):
slice_start += 1
# Trim from end
while slice_end > slice_start and _empty(lines[slice_end - 1]):
slice_end -= 1
return (slice_start, slice_end)
def unindent_tree(element):
"""
Remove the outermost indent. For example, the tree
"<div>A<blockqote>B<div>C<blockquote>D</blockquote>E</div>F</blockquote>G</div>"
is transformed to
"<div>A<div>B<div>C<blockquote>D</blockquote>E</div>F</div>G</div>"
"""
for el in element.iter():
if is_indentation_element(el):
el.attrib.clear()
el.tag = "div"
return
def slice_tree(tree, start_refs, end_refs, slice_tuple, html_copy=None):
"""
Slice the HTML tree with the given start_refs and end_refs (obtained via
get_line_info) at the given slice_tuple, a tuple (start, end) containing
the start and end of the slice (or None, to start from the start / end at
the end of the tree). If html_copy is specified, a new tree is constructed
from the given HTML (which must be the equal to the original tree's HTML*).
The resulting tree is returned.
*) The reason we have to specify the HTML is that we can't reliably
construct a copy of the tree using copy.copy() (see bug
https://bugs.launchpad.net/lxml/+bug/1562550).
"""
start_ref = None
end_ref = None
if slice_tuple:
slice_start, slice_end = slice_tuple
if (slice_start is not None and slice_start >= len(start_refs)) or (
slice_end is not None and slice_end <= 0
):
return get_html_tree("")
if slice_start is not None and slice_start <= 0:
slice_start = None
if slice_end is not None and slice_end >= len(start_refs):
slice_end = None
else:
slice_start, slice_end = None, None
if slice_start is not None:
start_ref = start_refs[slice_start]
if slice_end is not None and slice_end < len(end_refs):
end_ref = end_refs[slice_end - 1]
if html_copy is not None:
et = lxml.etree.ElementTree(tree)
new_tree = get_html_tree(html_copy)
if start_ref:
selector = et.getelementpath(start_ref[0])
start_ref = (new_tree.find(selector), start_ref[1])
if end_ref:
selector = et.getelementpath(end_ref[0])
end_ref = (new_tree.find(selector), end_ref[1])
else:
new_tree = tree
if start_ref:
include_start = start_ref[1] == BEGIN
if end_ref:
include_end = end_ref[1] == END
# If start_ref is the same as end_ref, and we don't include the element,
# we are removing the entire tree. We need to handle this separately,
# otherwise trim_tree_after won't work because it can't find the already
# removed reference.
if (
start_ref
and end_ref
and start_ref[0] == end_ref[0]
and (not include_start or not include_end)
):
return get_html_tree("")
if start_ref:
trim_tree_before(start_ref[0], include_element=include_start)
if end_ref:
trim_tree_after(end_ref[0], include_element=include_end)
return new_tree
def get_html_tree(html):
"""
Given the HTML string, returns a LXML tree object. The tree is wrapped in
<div> elements if it doesn't have a top level tag or parsing would
otherwise result in an error. The wrapping can be later removed with
strip_wrapping().
"""
parser = lxml.html.HTMLParser(encoding="utf-8")
html = html.encode("utf8")
try:
tree = lxml.html.fromstring(html, parser=parser)
except lxml.etree.Error:
# E.g. empty document. Use dummy <div>
tree = lxml.html.fromstring("<div></div>")
# If the document doesn't start with a top level tag, wrap it with a <div>
# that will be later stripped out for consistent behavior.
if tree.tag not in lxml.html.defs.top_level_tags:
html = b"<div>" + html + b"</div>"
tree = lxml.html.fromstring(html, parser=parser)
# HACK for Outlook emails, where tags like <o:p> are rendered as <p>. We
# can generally ignore these tags so we replace them with <span>, which
# doesn't cause a line break. Also, we can't look up the element path of
# tags that contain colons. When rendering the tree, we will restore the
# tag name.
for el in tree.iter():
if el.nsmap or (isinstance(el.tag, str) and ":" in el.tag):
if el.nsmap:
actual_tag_name = "{}:{}".format(list(el.nsmap.keys())[0], el.tag)
else:
actual_tag_name = el.tag
el.tag = "span"
el.attrib["__tag_name"] = actual_tag_name
return tree
def strip_wrapping(html):
"""
Remove the wrapping that might have resulted when using get_html_tree().
"""
if html.startswith("<div>") and html.endswith("</div>"):
html = html[5:-6]
return html.strip()
def render_html_tree(tree):
"""
Render the given HTML tree, and strip any wrapping that was applied in
get_html_tree().
You should avoid further processing of the given tree after calling this
method because we modify namespaced tags here.
"""
# Restore any tag names that were changed in get_html_tree()
for el in tree.iter():
if "__tag_name" in el.attrib:
actual_tag_name = el.attrib.pop("__tag_name")
el.tag = actual_tag_name
html = lxml.html.tostring(tree, encoding="utf8").decode("utf8")
return strip_wrapping(html)
def is_indentation_element(element):
if isinstance(element.tag, str):
return element.tag.lower() == "blockquote"
return False
def tree_token_generator(el, indentation_level=0):
"""
Yield tokens for the given HTML element as follows:
- A tuple (LXML element, BEGIN, indentation_level)
- Text right after the start of the tag, or None.
- Recursively calls the token generator for all child objects
- A tuple (LXML element, END, indentation_level)
- Text right after the end of the tag, or None.
"""
if not isinstance(el.tag, str):
return
is_indentation = is_indentation_element(el)
if is_indentation:
indentation_level += 1
yield (el, BEGIN, indentation_level)
yield el.text
for child in el.iterchildren():
yield from tree_token_generator(child, indentation_level)
if is_indentation:
indentation_level -= 1
yield (el, END, indentation_level)
yield el.tail
def tree_line_generator(el, max_lines=None):
"""
Iterate through an LXML tree and yield a tuple per line.
In this context, lines are blocks of text separated by <br> tags
or by block elements. The tuples contain the following elements:
- A tuple with the element reference (element, position) for the start
of the line. The tuple consists of:
- The LXML HTML element which references the line
- Whether the text starts at the beginning of the referenced element,
or after the closing tag
- A similar tuple indicating the ending of the line.
- The email indentation level, if detected.
- The plain (non-HTML) text of the line
If max_lines is specified, the generator stops after yielding the given
amount of lines.
For example, the HTML tree "<div>foo <span>bar</span><br>baz</div>" yields:
- ((<Element div>, 'begin'), (<Element br>, 'begin'), 0, 'foo bar')
- ((<Element br>, 'end'), (<Element div>, 'end'), 0, 'baz').
To illustrate the indentation level, the HTML tree
'<div><blockquote>hi</blockquote>world</div>' yields:
- ((<Element blockquote>, 'begin'), (<Element blockquote>, 'end'), 1, 'hi')
- ((<Element blockquote>, 'end'), (<Element div>, 'end'), 0, 'world')
"""
def _trim_spaces(text):
return MULTIPLE_WHITESPACE_RE.sub(" ", text).strip()
counter = 1
if max_lines is not None and counter > max_lines:
return
# Buffer for the current line.
line = ""
# The reference tuple (element, position) for the start of the line.
start_ref = None
# The indentation level at the start of the line.
start_indentation_level = None
for token in tree_token_generator(el):
if token is None:
continue
elif isinstance(token, tuple):
el, state, indentation_level = token
tag_name = el.tag.lower()
line_break = tag_name == "br" and state == BEGIN
is_block = tag_name not in INLINE_TAGS
is_forward = (
is_block and state == BEGIN and el.attrib.get("style") in FORWARD_STYLES
)
if is_block or line_break:
line = _trim_spaces(line)
if line or line_break or is_forward:
end_ref = (el, state)
yield start_ref, end_ref, start_indentation_level, line
counter += 1
if max_lines is not None and counter > max_lines:
return
line = ""
if is_forward:
# Simulate forward
yield (end_ref, end_ref, start_indentation_level, FORWARD_LINE)
counter += 1
if max_lines is not None and counter > max_lines:
return
if not line:
start_ref = (el, state)
start_indentation_level = indentation_level
elif isinstance(token, str):
line += token
else:
raise RuntimeError("invalid token: {}".format(token))
line = _trim_spaces(line)
if line:
yield line
def indented_tree_line_generator(el, max_lines=None):
r"""
Like tree_line_generator, but yields tuples (start_ref, end_ref, line),
where the line already takes the indentation into account by having "> "
prepended. If a line already starts with ">", it is escaped ("\\>"). This
makes it possible to reliably use methods that analyze plain text to detect
quoting.
"""
gen = tree_line_generator(el, max_lines)
for start_ref, end_ref, indentation_level, line in gen:
# Escape line
if line.startswith(">"):
line = "\\" + line
yield start_ref, end_ref, "> " * indentation_level + line
def get_line_info(tree, max_lines=None):
"""
Shortcut for indented_tree_line_generator() that returns an array of
start references, an array of corresponding end references (see
tree_line_generator() docs), and an array of corresponding lines.
"""
line_gen = indented_tree_line_generator(tree, max_lines=max_lines)
line_gen_result = list(zip(*line_gen))
if line_gen_result:
return line_gen_result
else:
return [], [], []
| {
"content_hash": "ecf6a6238ca220b8bf78b67d93692558",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 88,
"avg_line_length": 31.439140811455847,
"alnum_prop": 0.601609352463372,
"repo_name": "closeio/quotequail",
"id": "1bc19d73271fb5454017624f9fd54435b01a340e",
"size": "13187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quotequail/_html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9480"
},
{
"name": "Python",
"bytes": "84948"
}
],
"symlink_target": ""
} |
"""Common utility functions."""
import base64
import collections
import datetime
import hashlib
import imghdr
import json
import os
import random
import re
import string
import time
import unicodedata
import urllib
import urlparse
from constants import constants # pylint: disable=relative-import
import feconf # pylint: disable=relative-import
import yaml
class InvalidInputException(Exception):
"""Error class for invalid input."""
pass
class ValidationError(Exception):
"""Error class for when a domain object fails validation."""
pass
class ExplorationConversionError(Exception):
"""Error class for when an exploration fails to convert from a certain
version to a certain version.
"""
pass
def create_enum(*sequential, **names):
"""Creates a enumerated constant.
Args:
sequential: *. Sequence List to generate the enumerations.
names: *. Names of the enumerration.
Returns:
dict. Dictionary containing the enumerated constants.
"""
enums = dict(zip(sequential, sequential), **names)
return type('Enum', (), enums)
def get_file_contents(filepath, raw_bytes=False, mode='r'):
"""Gets the contents of a file, given a relative filepath from oppia/.
Args:
filepath: str. A full path to the file.
raw_bytes: bool. Flag for the raw_bytes output.
mode: str. File opening mode, default is in read mode.
Returns:
*. Either the raw_bytes stream if the flag is set or the
decoded stream in utf-8 format.
"""
with open(filepath, mode) as f:
return f.read() if raw_bytes else f.read().decode('utf-8')
def get_exploration_components_from_dir(dir_path):
"""Gets the (yaml, assets) from the contents of an exploration data dir.
Args:
dir_path: str. a full path to the exploration root directory.
Returns:
*. A 2-tuple, the first element of which is a yaml string, and the
second element of which is a list of (filepath, content) 2-tuples.
The filepath does not include the assets/ prefix.
Raises:
Exception: if the following condition doesn't hold: "There is exactly one
file not in assets/, and this file has a .yaml suffix".
"""
yaml_content = None
assets_list = []
dir_path_array = dir_path.split('/')
while dir_path_array[-1] == '':
dir_path_array = dir_path_array[:-1]
dir_path_length = len(dir_path_array)
for root, dirs, files in os.walk(dir_path):
for directory in dirs:
if root == dir_path and directory != 'assets':
raise Exception(
'The only directory in %s should be assets/' % dir_path)
for filename in files:
filepath = os.path.join(root, filename)
if root == dir_path:
# These files are added automatically by Mac OS Xsystems.
# We ignore them.
if not filepath.endswith('.DS_Store'):
if yaml_content is not None:
raise Exception(
'More than one non-asset file specified '
'for %s' % dir_path)
elif not filepath.endswith('.yaml'):
raise Exception(
'Found invalid non-asset file %s. There '
'should only be a single non-asset file, '
'and it should have a .yaml suffix.' % filepath)
else:
yaml_content = get_file_contents(filepath)
else:
filepath_array = filepath.split('/')
# The additional offset is to remove the 'assets/' prefix.
filename = '/'.join(filepath_array[dir_path_length + 1:])
assets_list.append((filename, get_file_contents(
filepath, raw_bytes=True)))
if yaml_content is None:
raise Exception('No yaml file specifed for %s' % dir_path)
return yaml_content, assets_list
def get_comma_sep_string_from_list(items):
"""Turns a list of items into a comma-separated string.
Args:
items: list. List of the items.
Returns:
str. String containing the items in the list separated by commas.
"""
if not items:
return ''
if len(items) == 1:
return items[0]
return '%s and %s' % (', '.join(items[:-1]), items[-1])
def to_ascii(input_string):
"""Change unicode characters in a string to ascii if possible.
Args:
input_string: str. String to convert.
Returns:
str. String containing the ascii representation of the input string.
"""
return unicodedata.normalize(
'NFKD', unicode(input_string)).encode('ascii', 'ignore')
def yaml_from_dict(dictionary, width=80):
"""Gets the YAML representation of a dict.
Args:
dictionary: dict. Dictionary for conversion into yaml.
width: int. Width for the yaml representation, default value
is set to be of 80.
Returns:
str. Converted yaml of the passed dictionary.
"""
return yaml.safe_dump(dictionary, default_flow_style=False, width=width)
def dict_from_yaml(yaml_str):
"""Gets the dict representation of a YAML string.
Args:
yaml_str: str. Yaml string for conversion into dict.
Returns:
dict. Parsed dict representation of the yaml string.
Raises:
InavlidInputException: If the yaml string sent as the
parameter is unable to get parsed, them this error gets
raised.
"""
try:
retrieved_dict = yaml.safe_load(yaml_str)
assert isinstance(retrieved_dict, dict)
return retrieved_dict
except yaml.YAMLError as e:
raise InvalidInputException(e)
def recursively_remove_key(obj, key_to_remove):
"""Recursively removes keys from a list or dict.
Args:
obj: *. List or dict passed for which the keys has to
be removed.
key_to_remove: str. Key value that has to be removed.
Returns:
*. Dict or list with a particular key value removed.
"""
if isinstance(obj, list):
for item in obj:
recursively_remove_key(item, key_to_remove)
elif isinstance(obj, dict):
if key_to_remove in obj:
del obj[key_to_remove]
for key, unused_value in obj.items():
recursively_remove_key(obj[key], key_to_remove)
def get_random_int(upper_bound):
"""Returns a random integer in [0, upper_bound).
Args:
upper_bound: int. Upper limit for generation of random
integer.
Returns:
int. Randomly generated integer less than the upper_bound.
"""
assert upper_bound >= 0 and isinstance(upper_bound, int)
generator = random.SystemRandom()
return generator.randrange(0, stop=upper_bound)
def get_random_choice(alist):
"""Gets a random element from a list.
Args:
alist: list(*). Input to get a random choice.
Returns:
*. Random element choosen from the passed input list.
"""
assert isinstance(alist, list) and len(alist) > 0
index = get_random_int(len(alist))
return alist[index]
def convert_png_binary_to_data_url(content):
"""Converts a png image string (represented by 'content') to a data URL.
Args:
content: str. PNG binary file content.
Returns:
*. Data url created from the binary content of the PNG.
Raises:
Exception: If the given binary string is not of a PNG image.
"""
if imghdr.what(None, h=content) == 'png':
return 'data:image/png;base64,%s' % urllib.quote(
content.encode('base64'))
else:
raise Exception('The given string does not represent a PNG image.')
def convert_png_to_data_url(filepath):
"""Converts the png file at filepath to a data URL."""
file_contents = get_file_contents(filepath, raw_bytes=True, mode='rb')
return convert_png_binary_to_data_url(file_contents)
def camelcase_to_hyphenated(camelcase_str):
"""Camelcase to hyhpenated conversion of the passed string.
Args:
camelcase_str: str. Camelcase string representation.
Returns:
str. Hypenated string representation of the camelcase string.
"""
intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', camelcase_str)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', intermediate_str).lower()
def camelcase_to_snakecase(camelcase_str):
"""Camelcase to snake case conversion of the passed string.
Args:
camelcase_str: str. Camelcase string representation.
Returns:
str. Snakecase representation of the passed camelcase string.
"""
intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camelcase_str)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', intermediate_str).lower()
def set_url_query_parameter(url, param_name, param_value):
"""Set or replace a query parameter, and return the modified URL.
Args:
url: str. URL string which contains the query parameter.
param_name: str. Parameter name to be removed.
param_value: str. Set the parameter value, if it exists.
Returns:
str. Formated URL that has query parameter set or replaced.
Raises:
Exception: If the query parameter sent is not of string type,
them this exception is raised.
"""
if not isinstance(param_name, basestring):
raise Exception(
'URL query parameter name must be a string, received %s'
% param_name)
scheme, netloc, path, query_string, fragment = urlparse.urlsplit(url)
query_params = urlparse.parse_qs(query_string)
query_params[param_name] = [param_value]
new_query_string = urllib.urlencode(query_params, doseq=True)
return urlparse.urlunsplit(
(scheme, netloc, path, new_query_string, fragment))
class JSONEncoderForHTML(json.JSONEncoder):
"""Encodes JSON that is safe to embed in HTML."""
def encode(self, o):
chunks = self.iterencode(o, True)
return ''.join(chunks) if self.ensure_ascii else u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(
JSONEncoderForHTML, self).iterencode(o, _one_shot=_one_shot)
for chunk in chunks:
yield chunk.replace('&', '\\u0026').replace(
'<', '\\u003c').replace('>', '\\u003e')
def convert_to_hash(input_string, max_length):
"""Convert a string to a SHA1 hash.
Args:
input_string: str. Input string for conversion to hash.
max_length: int. Maximum Length of the generated hash.
Returns:
str. Hash Value generated from the input_String of the
specified length.
Raises:
Exception: If the input string is not the instance of the basestring,
them this exception is raised.
"""
if not isinstance(input_string, basestring):
raise Exception(
'Expected string, received %s of type %s' %
(input_string, type(input_string)))
# Encodes strings using the character set [A-Za-z0-9].
encoded_string = base64.b64encode(
hashlib.sha1(input_string.encode('utf-8')).digest(),
altchars='ab'
).replace('=', 'c')
return encoded_string[:max_length]
def base64_from_int(value):
"""Converts the number into base64 representation.
Args:
value: int. Integer value for conversion into base64.
Returns:
*. Returns the base64 representation of the number passed.
"""
return base64.b64encode(bytes([value]))
def get_time_in_millisecs(datetime_obj):
"""Returns time in milliseconds since the Epoch.
Args:
datetime_obj: datetime. An object of type datetime.datetime.
Returns:
float. This returns the time in the millisecond since the Epoch.
"""
seconds = time.mktime(datetime_obj.timetuple()) * 1000
return seconds + datetime_obj.microsecond / 1000.0
def get_current_time_in_millisecs():
"""Returns time in milliseconds since the Epoch."""
return get_time_in_millisecs(datetime.datetime.utcnow())
def get_human_readable_time_string(time_msec):
"""Given a time in milliseconds since the epoch, get a human-readable
time string for the admin dashboard.
"""
return time.strftime('%B %d %H:%M:%S', time.gmtime(time_msec / 1000.0))
def are_datetimes_close(later_datetime, earlier_datetime):
"""Given two datetimes, determines whether they are separated by less than
feconf.PROXIMAL_TIMEDELTA_SECS seconds.
"""
difference_in_secs = (later_datetime - earlier_datetime).total_seconds()
return difference_in_secs < feconf.PROXIMAL_TIMEDELTA_SECS
def generate_random_string(length):
"""Generates a random string of the specified length.
Args:
length: int. Length of the string to be generated.
Returns:
str. Random string of specified length.
"""
return base64.urlsafe_b64encode(os.urandom(length))[:length]
def generate_new_session_id():
"""Generates a new session id.
Returns:
str. Random string of length 24.
"""
return generate_random_string(24)
def vfs_construct_path(base_path, *path_components):
"""Mimics behavior of os.path.join on Posix machines."""
path = base_path
for component in path_components:
if component.startswith('/'):
path = component
elif path == '' or path.endswith('/'):
path += component
else:
path += '/%s' % component
return path
def vfs_normpath(path):
"""Normalize path from posixpath.py, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode).
slash, dot = (u'/', u'.') if isinstance(path, unicode) else ('/', '.')
if path == '':
return dot
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp in ('', '.'):
continue
if (comp != '..' or
(not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = slash.join(comps)
if initial_slashes:
path = slash * initial_slashes + path
return path or dot
def require_valid_name(name, name_type, allow_empty=False):
"""Generic name validation.
Args:
name: str. The name to validate.
name_type: str. A human-readable string, like 'the exploration title' or
'a state name'. This will be shown in error messages.
allow_empty: bool. If True, empty strings are allowed.
"""
if not isinstance(name, basestring):
raise ValidationError('%s must be a string.' % name)
if allow_empty and name == '':
return
# This check is needed because state names are used in URLs and as ids
# for statistics, so the name length should be bounded above.
if len(name) > 50 or len(name) < 1:
raise ValidationError(
'The length of %s should be between 1 and 50 '
'characters; received %s' % (name_type, name))
if name[0] in string.whitespace or name[-1] in string.whitespace:
raise ValidationError(
'Names should not start or end with whitespace.')
if re.search(r'\s\s+', name):
raise ValidationError(
'Adjacent whitespace in %s should be collapsed.' % name_type)
for character in constants.INVALID_NAME_CHARS:
if character in name:
raise ValidationError(
'Invalid character %s in %s: %s' %
(character, name_type, name))
def capitalize_string(input_string):
"""Converts the first character of a string to its uppercase equivalent (if
it's a letter), and returns the result.
Args:
input_string: str. String to process (to capitalize).
Returns:
str. Capitalizes the string.
"""
# This guards against empty strings.
if input_string:
return input_string[0].upper() + input_string[1:]
else:
return input_string
def get_hex_color_for_category(category):
"""Returns the category, it returns the color associated with the category,
if the category is present in the app constants else given a default color.
Args:
category: str. Category to get color.
Returns:
str. Color assigned to that category.
"""
return (
constants.CATEGORIES_TO_COLORS[category]
if category in constants.CATEGORIES_TO_COLORS
else constants.DEFAULT_COLOR)
def get_thumbnail_icon_url_for_category(category):
"""Returns the category, it returns the associated thumbnail icon, if the
category is present in the app constants else given a default thumbnail.
Args:
category: str. Category to get Thumbnail icon.
Returns:
str. Path to the Thumbnail Icon assigned to that category.
"""
icon_name = (
category if category in constants.CATEGORIES_TO_COLORS
else constants.DEFAULT_THUMBNAIL_ICON)
# Remove all spaces from the string.
return '/subjects/%s.svg' % (icon_name.replace(' ', ''))
def is_valid_language_code(language_code):
"""Checks if the given language code is a valid language code.
Args:
language_code: str. The language code.
Returns:
bool. Whether the language code is valid or not.
"""
language_codes = [lc['code'] for lc in constants.ALL_LANGUAGE_CODES]
return language_code in language_codes
def unescape_encoded_uri_component(escaped_string):
"""Unescape a string that is encoded with encodeURIComponent."""
return urllib.unquote(escaped_string).decode('utf-8')
def get_asset_dir_prefix():
"""Returns prefix for asset directory depending whether dev or prod.
It is used as a prefix in urls for images, css and script files.
"""
asset_dir_prefix = ''
if not constants.DEV_MODE:
asset_dir_prefix = '/build'
return asset_dir_prefix
def convert_to_str(string_to_convert):
"""Converts the given unicode string to a string. If the string is not
unicode, we return the string.
Args:
string_to_convert: unicode|str.
Returns:
str. The encoded string.
"""
if isinstance(string_to_convert, unicode):
return string_to_convert.encode('utf-8')
return string_to_convert
def get_hashable_value(value):
"""This function returns a hashable version of the input JSON-like value.
It converts the built-in sequences into their hashable counterparts
{list: tuple, dict: (sorted tuple of pairs)}. Additionally, their
elements are converted to hashable values through recursive calls. All
other value types are assumed to already be hashable.
Args:
value: *. Some JSON-like object, that is, an object made-up of only:
lists, dicts, strings, ints, bools, None. Types can be nested in
each other.
Returns:
hashed_value: *. A new object that will always have the same hash for
"equivalent" values.
"""
if isinstance(value, list):
return tuple(get_hashable_value(e) for e in value)
elif isinstance(value, dict):
return tuple(sorted(
# Dict keys are already hashable, only values need converting.
(k, get_hashable_value(v)) for k, v in value.iteritems()))
else:
return value
class OrderedCounter(collections.Counter, collections.OrderedDict):
"""Counter that remembers the order elements are first encountered."""
pass
| {
"content_hash": "4304a8048ec754c383dcd5c4cc6b5cd9",
"timestamp": "",
"source": "github",
"line_count": 644,
"max_line_length": 80,
"avg_line_length": 31.091614906832298,
"alnum_prop": 0.6328222544074315,
"repo_name": "souravbadami/oppia",
"id": "ec3872ed0b5b964aa1233aeaf9a67302a4c1da59",
"size": "20628",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90864"
},
{
"name": "HTML",
"bytes": "1044569"
},
{
"name": "JavaScript",
"bytes": "606331"
},
{
"name": "Python",
"bytes": "7870122"
},
{
"name": "Shell",
"bytes": "54930"
},
{
"name": "TypeScript",
"bytes": "4922933"
}
],
"symlink_target": ""
} |
import sys
import time
import uuid
from database import DataBase
from datetime import datetime
from twitter import Twitter
from config import Config
class WeiboMessage(object):
def __init__(self,site,moduleId,moduleName,moduleImageList):
self.message_id = str(uuid.uuid1())
self.message_comment = moduleName
self.message_web_site = site
self.message_module_id = moduleId
self.moduleImageList = moduleImageList
class WeiboImage(object):
def __init__(self,imageid,filepath):
self.imageid = imageid
self.filepath = filepath
self.pid = ''
class TwitterTask(object):
def __init__(self,config):
self.config = config
self.db = DataBase(self.config)
self.lasttime = self.getLastTime()
def getLastTime(self):
sql = "SELECT message_time from wb_message order by message_time desc LIMIT 0,1"
result = self.db.searchOne(sql)
if result != None:
return result['message_time']
return None
def getYeskyMessage(self):
sql = "SELECT module_id,module_name FROM yeskysite where NOT EXISTS (SELECT message_module_id from wb_message where message_module_id = yeskysite.module_id) LIMIT 0 ,1"
result = self.db.searchOne(sql)
if result != None:
moduleId = result['module_id']
moduleName = result['module_name']
moduleImageList = []
sql = "SELECT image_id,filepath FROM yeskyimage WHERE module_id = '"+moduleId+"'"
result = self.db.search(sql)
for row in result:
moduleImageList.append(WeiboImage(row['image_id'],row['filepath']))
if len(moduleImageList) >0:
return WeiboMessage('yesky',moduleId,moduleName,moduleImageList)
return None
def getMessage(self):
return self.getYeskyMessage()
def sendMessage(self,message):
tw = Twitter(self.config)
picList = []
for image in message.moduleImageList:
if image.filepath != None and image.filepath != '':
pid = tw.uploadImage(image.filepath)
print(pid)
image.pid = pid
picList.append(pid)
iCount = 0
pids = ""
iSend = 1;
for i in range(len(picList)):
pids = pids + picList[i] + ","
iCount = iCount +1
if iCount >= 4:
tw.sendImage(message.message_comment + str(iSend),pids.strip())
iSend = iSend + 1
time.sleep(1)
pids = ""
iCount = 0
if iCount > 0:
tw.sendImage(message.message_comment + str(iSend) + tw.getRandomTips(),pids.strip())
self.saveToDatabase(message)
def saveToDatabase(self,message):
sql = "Insert into wb_message values('"+message.message_id+"',now(),'"+message.message_comment+"','"+self.config.Login+"','"+message.message_web_site+"','"+message.message_module_id+"')"
self.db.insertData(sql)
for image in message.moduleImageList:
sql = "Insert into wb_message_image values('"+str(uuid.uuid1())+"','"+image.imageid+"','"+image.pid+"')"
self.db.insertData(sql)
def start(self):
while True:
try:
currTime = datetime.now()
if self.lasttime == None or (currTime - self.lasttime).seconds / 60 >= 30:
message = self.getMessage()
print(message.message_comment)
if message != None :
self.sendMessage(message)
self.lasttime = currTime
except Exception as e:
print(e)
time.sleep(120)
if __name__ == '__main__':
tt = TwitterTask(Config())
tt.start()
| {
"content_hash": "7773b712bbdca036603152634bc65e92",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 194,
"avg_line_length": 36.299065420560744,
"alnum_prop": 0.5671987641606591,
"repo_name": "XJouYi/TwitterRobot",
"id": "f3493d710f661d24ba24ec67c83c5221d46b9e1c",
"size": "3927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twittertask.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11181"
}
],
"symlink_target": ""
} |
from django.contrib.sites.managers import CurrentSiteManager
## legacy...
class TempJobsManager(CurrentSiteManager):
def get_query_set(self):
return super(TempJobsManager, self).get_query_set() \
.filter(status=self.model.TEMPORARY)
class ActiveJobsManager(CurrentSiteManager):
def get_query_set(self):
return super(ActiveJobsManager, self).get_query_set() \
.filter(status=self.model.ACTIVE)
| {
"content_hash": "d02ca60343b7d486e87fc378504d9a5e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 63,
"avg_line_length": 33.5,
"alnum_prop": 0.6716417910447762,
"repo_name": "wtrevino/django-listings",
"id": "ac7409a486fe02e662e55ec82e07301688219818",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "listings/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "5455"
},
{
"name": "Python",
"bytes": "113175"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systemcollectionparam(base_resource) :
""" Configuration for collection parameter resource. """
def __init__(self) :
self._communityname = ""
self._loglevel = ""
self._datapath = ""
@property
def communityname(self) :
"""SNMPv1 community name for authentication.
"""
try :
return self._communityname
except Exception as e:
raise e
@communityname.setter
def communityname(self, communityname) :
"""SNMPv1 community name for authentication.
"""
try :
self._communityname = communityname
except Exception as e:
raise e
@property
def loglevel(self) :
"""specify the log level. Possible values CRITICAL,WARNING,INFO,DEBUG1,DEBUG2.<br/>Minimum length = 1.
"""
try :
return self._loglevel
except Exception as e:
raise e
@loglevel.setter
def loglevel(self, loglevel) :
"""specify the log level. Possible values CRITICAL,WARNING,INFO,DEBUG1,DEBUG2.<br/>Minimum length = 1
"""
try :
self._loglevel = loglevel
except Exception as e:
raise e
@property
def datapath(self) :
"""specify the data path to the database.<br/>Minimum length = 1.
"""
try :
return self._datapath
except Exception as e:
raise e
@datapath.setter
def datapath(self, datapath) :
"""specify the data path to the database.<br/>Minimum length = 1
"""
try :
self._datapath = datapath
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systemcollectionparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systemcollectionparam
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update systemcollectionparam.
"""
try :
if type(resource) is not list :
updateresource = systemcollectionparam()
updateresource.communityname = resource.communityname
updateresource.loglevel = resource.loglevel
updateresource.datapath = resource.datapath
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of systemcollectionparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = systemcollectionparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the systemcollectionparam resources that are configured on netscaler.
"""
try :
if not name :
obj = systemcollectionparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class systemcollectionparam_response(base_response) :
def __init__(self, length=1) :
self.systemcollectionparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systemcollectionparam = [systemcollectionparam() for _ in range(length)]
| {
"content_hash": "b4a9b950c3cb7e667ca187567b3438e9",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 123,
"avg_line_length": 28.685314685314687,
"alnum_prop": 0.71160409556314,
"repo_name": "mahabs/nitro",
"id": "336a235afe6197028c57336a934a56200fb7a56d",
"size": "4716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/system/systemcollectionparam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
} |
u"""
.. module:: models
"""
import logging
import os
# pylint: disable=unused-import
import uuid
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models import F
from django.utils import timezone
logger = logging.getLogger('volontulo.models')
class Organization(models.Model):
u"""Model that handles ogranizations/institutions."""
name = models.CharField(max_length=150)
address = models.CharField(max_length=150)
description = models.TextField()
def __str__(self):
u"""Organization model string reprezentation."""
return self.name
class OffersManager(models.Manager):
u"""Offers Manager."""
def get_active(self):
u"""Return active offers."""
return self.filter(
offer_status='published',
action_status__in=('ongoing', 'future'),
recruitment_status__in=('open', 'supplemental'),
).all()
def get_for_administrator(self):
u"""Return all offers for administrator to allow management."""
return self.filter(offer_status='unpublished').all()
def get_weightened(self, count=10):
u"""Return all published offers ordered by weight.
:param count: Integer
:return:
"""
return self.filter(
offer_status='published').order_by('weight')[:count]
def get_archived(self):
u"""Return archived offers."""
return self.filter(
offer_status='published',
action_status__in=('ongoing', 'finished'),
recruitment_status='closed',
).all()
class Offer(models.Model):
u"""Offer model."""
OFFER_STATUSES = (
('unpublished', u'Unpublished'),
('published', u'Published'),
('rejected', u'Rejected'),
)
RECRUITMENT_STATUSES = (
('open', u'Open'),
('supplemental', u'Supplemental'),
('closed', u'Closed'),
)
ACTION_STATUSES = (
('future', u'Future'),
('ongoing', u'Ongoing'),
('finished', u'Finished'),
)
objects = OffersManager()
organization = models.ForeignKey(Organization)
volunteers = models.ManyToManyField(User)
description = models.TextField()
requirements = models.TextField(blank=True, default='')
time_commitment = models.TextField()
benefits = models.TextField()
location = models.CharField(max_length=150)
title = models.CharField(max_length=150)
started_at = models.DateTimeField(blank=True, null=True)
finished_at = models.DateTimeField(blank=True, null=True)
time_period = models.CharField(max_length=150, default='', blank=True)
status_old = models.CharField(
max_length=30,
default='NEW',
null=True,
unique=False
)
offer_status = models.CharField(
max_length=16,
choices=OFFER_STATUSES,
default='unpublished',
)
recruitment_status = models.CharField(
max_length=16,
choices=RECRUITMENT_STATUSES,
default='open',
)
action_status = models.CharField(
max_length=16,
choices=ACTION_STATUSES,
default='ongoing',
)
votes = models.BooleanField(default=0)
recruitment_start_date = models.DateTimeField(blank=True, null=True)
recruitment_end_date = models.DateTimeField(blank=True, null=True)
reserve_recruitment = models.BooleanField(blank=True, default=True)
reserve_recruitment_start_date = models.DateTimeField(
blank=True,
null=True
)
reserve_recruitment_end_date = models.DateTimeField(
blank=True,
null=True
)
action_ongoing = models.BooleanField(default=False, blank=True)
constant_coop = models.BooleanField(default=False, blank=True)
action_start_date = models.DateTimeField(blank=True, null=True)
action_end_date = models.DateTimeField(blank=True, null=True)
volunteers_limit = models.IntegerField(default=0, null=True, blank=True)
weight = models.IntegerField(default=0, null=True, blank=True)
def __str__(self):
u"""Offer string representation."""
return self.title
def set_main_image(self, is_main):
u"""Set main image flag unsetting other offers images.
:param is_main: Boolean flag resetting offer main image
"""
if is_main:
OfferImage.objects.filter(offer=self).update(is_main=False)
return True
return False
def save_offer_image(self, gallery, userprofile, is_main=False):
u"""Handle image upload for user profile page.
:param gallery: UserProfile model instance
:param userprofile: UserProfile model instance
:param is_main: Boolean main image flag
"""
gallery.offer = self
gallery.userprofile = userprofile
gallery.is_main = self.set_main_image(is_main)
gallery.save()
return self
def create_new(self):
u"""Set status while creating new offer."""
self.offer_status = 'unpublished'
self.recruitment_status = 'open'
if self.started_at or self.finished_at:
self.action_status = self.determine_action_status()
def determine_action_status(self):
u"""Determine action status by offer dates."""
if (
(
self.finished_at and
self.started_at < timezone.now() < self.finished_at
) or
(
self.started_at < timezone.now() and
not self.finished_at
)
):
return 'ongoing'
elif self.started_at > timezone.now():
return 'future'
else:
return 'finished'
def change_status(self, status):
u"""Change offer status.
:param status: string Offer status
"""
if status in ('published', 'rejected', 'unpublished'):
self.offer_status = status
self.save()
return self
def unpublish(self):
u"""Unpublish offer."""
self.offer_status = 'unpublished'
self.save()
return self
def publish(self):
u"""Publish offer."""
self.offer_status = 'published'
Offer.objects.all().update(weight=F('weight') + 1)
self.weight = 0
self.save()
return self
def reject(self):
u"""Reject offer."""
self.offer_status = 'rejected'
self.save()
return self
def close_offer(self):
u"""Change offer status to close."""
self.offer_status = 'unpublished'
self.action_status = 'finished'
self.recruitment_status = 'closed'
self.save()
return self
class UserProfile(models.Model):
u"""Model that handles users' profiles."""
user = models.OneToOneField(User)
organizations = models.ManyToManyField(
Organization,
related_name='userprofiles',
)
is_administrator = models.BooleanField(default=False, blank=True)
phone_no = models.CharField(
max_length=32,
blank=True,
default='',
null=True
)
uuid = models.UUIDField(default=uuid.uuid4, unique=True)
def is_admin(self):
u"""Return True if current user is administrator, else return False"""
return self.is_administrator
def is_volunteer(self):
u"""Return True if current user is volunteer, else return False"""
return not (self.is_administrator and self.organizations)
def can_edit_offer(self, offer=None, offer_id=None):
u"""Checks if the user can edit an offer based on its ID"""
if offer is None:
offer = Offer.objects.get(id=offer_id)
return self.is_administrator or self.organizations.filter(
id=offer.organization_id).exists()
def get_avatar(self):
u"""Return avatar for current user."""
return UserGallery.objects.filter(
userprofile=self,
is_avatar=True
)
def clean_images(self):
u"""Clean user images."""
images = UserGallery.objects.filter(userprofile=self)
for image in images:
try:
os.remove(os.path.join(settings.MEDIA_ROOT, str(image.image)))
except OSError as ex:
logger.error(ex)
image.delete()
def __str__(self):
return self.user.email
class UserGallery(models.Model):
u"""Handling user images."""
userprofile = models.ForeignKey(UserProfile, related_name='images')
image = models.ImageField(upload_to='profile/')
is_avatar = models.BooleanField(default=False)
def __str__(self):
u"""String representation of an image."""
return str(self.image)
class OfferImage(models.Model):
u"""Handling offer image."""
userprofile = models.ForeignKey(UserProfile, related_name='offerimages')
offer = models.ForeignKey(Offer, related_name='images')
path = models.ImageField(upload_to='offers/')
is_main = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
u"""String representation of an image."""
return str(self.path)
class OrganizationGallery(models.Model):
u"""Handling organizations gallery."""
organization = models.ForeignKey(Organization, related_name='images')
published_by = models.ForeignKey(UserProfile, related_name='gallery')
path = models.ImageField(upload_to='gallery/')
is_main = models.BooleanField(default=False, blank=True)
def __str__(self):
u"""String representation of an image."""
return str(self.path)
def remove(self):
u"""Remove image."""
self.remove()
def set_as_main(self, organization):
u"""Save image as main.
:param organization: Organization model instance
"""
OrganizationGallery.objects.filter(organization_id=organization.id)\
.update(
is_main=False
)
self.is_main = True
self.save()
@staticmethod
def get_organizations_galleries(userprofile):
u"""Get images grouped by organizations
:param userprofile: UserProfile model instance
"""
organizations = Organization.objects.filter(
userprofiles=userprofile
).all()
return {o.name: o.images.all() for o in organizations}
class Page(models.Model):
"""Static page model."""
title = models.CharField(max_length=255)
content = models.TextField()
author = models.ForeignKey(UserProfile)
published = models.BooleanField(default=False)
modified_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
| {
"content_hash": "7df80001e798a975d3559b542218f07d",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 78,
"avg_line_length": 30.77683615819209,
"alnum_prop": 0.6166131252868288,
"repo_name": "stxnext-csr/volontulo",
"id": "4a29300c17703f763f97371cc70edffc1dd4d39b",
"size": "10920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/volontulo/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20135"
},
{
"name": "HTML",
"bytes": "92576"
},
{
"name": "JavaScript",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "201347"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
} |
import hashlib
import io
import random
from shutil import rmtree
from tempfile import mkdtemp
from unittest import skipIf
import attr
from itemadapter import ItemAdapter
from twisted.trial import unittest
from scrapy.http import Request, Response
from scrapy.item import Field, Item
from scrapy.pipelines.images import ImagesPipeline
from scrapy.settings import Settings
from scrapy.utils.python import to_bytes
try:
from dataclasses import make_dataclass, field as dataclass_field
except ImportError:
make_dataclass = None
dataclass_field = None
skip = False
try:
from PIL import Image
except ImportError:
skip = 'Missing Python Imaging Library, install https://pypi.python.org/pypi/Pillow'
else:
encoders = {'jpeg_encoder', 'jpeg_decoder'}
if not encoders.issubset(set(Image.core.__dict__)):
skip = 'Missing JPEG encoders'
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class ImagesPipelineTestCase(unittest.TestCase):
skip = skip
def setUp(self):
self.tempdir = mkdtemp()
self.pipeline = ImagesPipeline(self.tempdir, download_func=_mocked_download_func)
def tearDown(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
self.assertEqual(
file_path(Request("https://dev.mydeco.com/mydeco.gif")),
'full/3fd165099d8e71b8a48b2683946e64dbfad8b52d.jpg')
self.assertEqual(
file_path(Request("http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.jpg")),
'full/0ffcd85d563bca45e2f90becd0ca737bc58a00b2.jpg')
self.assertEqual(
file_path(Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.gif")),
'full/b250e3a74fff2e4703e310048a5b13eba79379d2.jpg')
self.assertEqual(
file_path(Request("http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg")),
'full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg')
self.assertEqual(
file_path(Request("http://www.dorma.co.uk/images/product_details/2532/")),
'full/97ee6f8a46cbbb418ea91502fd24176865cf39b2.jpg')
self.assertEqual(
file_path(Request("http://www.dorma.co.uk/images/product_details/2532")),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1.jpg')
self.assertEqual(
file_path(Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object()),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1.jpg')
def test_thumbnail_name(self):
thumb_path = self.pipeline.thumb_path
name = '50'
self.assertEqual(thumb_path(Request("file:///tmp/foo.jpg"), name),
'thumbs/50/38a86208c36e59d4404db9e37ce04be863ef0335.jpg')
self.assertEqual(thumb_path(Request("file://foo.png"), name),
'thumbs/50/e55b765eba0ec7348e50a1df496040449071b96a.jpg')
self.assertEqual(thumb_path(Request("file:///tmp/foo"), name),
'thumbs/50/0329ad83ebb8e93ea7c7906d46e9ed55f7349a50.jpg')
self.assertEqual(thumb_path(Request("file:///tmp/some.name/foo"), name),
'thumbs/50/850233df65a5b83361798f532f1fc549cd13cbe9.jpg')
self.assertEqual(thumb_path(Request("file:///tmp/some.name/foo"), name,
response=Response("file:///tmp/some.name/foo"),
info=object()),
'thumbs/50/850233df65a5b83361798f532f1fc549cd13cbe9.jpg')
def test_convert_image(self):
SIZE = (100, 100)
# straigh forward case: RGB and JPEG
COLOUR = (0, 127, 255)
im = _create_image('JPEG', 'RGB', SIZE, COLOUR)
converted, _ = self.pipeline.convert_image(im)
self.assertEqual(converted.mode, 'RGB')
self.assertEqual(converted.getcolors(), [(10000, COLOUR)])
# check that thumbnail keep image ratio
thumbnail, _ = self.pipeline.convert_image(converted, size=(10, 25))
self.assertEqual(thumbnail.mode, 'RGB')
self.assertEqual(thumbnail.size, (10, 10))
# transparency case: RGBA and PNG
COLOUR = (0, 127, 255, 50)
im = _create_image('PNG', 'RGBA', SIZE, COLOUR)
converted, _ = self.pipeline.convert_image(im)
self.assertEqual(converted.mode, 'RGB')
self.assertEqual(converted.getcolors(), [(10000, (205, 230, 255))])
# transparency case with palette: P and PNG
COLOUR = (0, 127, 255, 50)
im = _create_image('PNG', 'RGBA', SIZE, COLOUR)
im = im.convert('P')
converted, _ = self.pipeline.convert_image(im)
self.assertEqual(converted.mode, 'RGB')
self.assertEqual(converted.getcolors(), [(10000, (205, 230, 255))])
class DeprecatedImagesPipeline(ImagesPipeline):
def file_key(self, url):
return self.image_key(url)
def image_key(self, url):
image_guid = hashlib.sha1(to_bytes(url)).hexdigest()
return f'empty/{image_guid}.jpg'
def thumb_key(self, url, thumb_id):
thumb_guid = hashlib.sha1(to_bytes(url)).hexdigest()
return f'thumbsup/{thumb_id}/{thumb_guid}.jpg'
class ImagesPipelineTestCaseFieldsMixin:
def test_item_fields_default(self):
url = 'http://www.example.com/images/1.jpg'
item = self.item_class(name='item1', image_urls=[url])
pipeline = ImagesPipeline.from_settings(Settings({'IMAGES_STORE': 's3://example/images/'}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
item = pipeline.item_completed(results, item, None)
images = ItemAdapter(item).get("images")
self.assertEqual(images, [results[0][1]])
self.assertIsInstance(item, self.item_class)
def test_item_fields_override_settings(self):
url = 'http://www.example.com/images/1.jpg'
item = self.item_class(name='item1', custom_image_urls=[url])
pipeline = ImagesPipeline.from_settings(Settings({
'IMAGES_STORE': 's3://example/images/',
'IMAGES_URLS_FIELD': 'custom_image_urls',
'IMAGES_RESULT_FIELD': 'custom_images'
}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
item = pipeline.item_completed(results, item, None)
custom_images = ItemAdapter(item).get("custom_images")
self.assertEqual(custom_images, [results[0][1]])
self.assertIsInstance(item, self.item_class)
class ImagesPipelineTestCaseFieldsDict(ImagesPipelineTestCaseFieldsMixin, unittest.TestCase):
item_class = dict
class ImagesPipelineTestItem(Item):
name = Field()
# default fields
image_urls = Field()
images = Field()
# overridden fields
custom_image_urls = Field()
custom_images = Field()
class ImagesPipelineTestCaseFieldsItem(ImagesPipelineTestCaseFieldsMixin, unittest.TestCase):
item_class = ImagesPipelineTestItem
@skipIf(not make_dataclass, "dataclasses module is not available")
class ImagesPipelineTestCaseFieldsDataClass(ImagesPipelineTestCaseFieldsMixin, unittest.TestCase):
item_class = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if make_dataclass:
self.item_class = make_dataclass(
"FilesPipelineTestDataClass",
[
("name", str),
# default fields
("image_urls", list, dataclass_field(default_factory=list)),
("images", list, dataclass_field(default_factory=list)),
# overridden fields
("custom_image_urls", list, dataclass_field(default_factory=list)),
("custom_images", list, dataclass_field(default_factory=list)),
],
)
@attr.s
class ImagesPipelineTestAttrsItem:
name = attr.ib(default="")
# default fields
image_urls = attr.ib(default=lambda: [])
images = attr.ib(default=lambda: [])
# overridden fields
custom_image_urls = attr.ib(default=lambda: [])
custom_images = attr.ib(default=lambda: [])
class ImagesPipelineTestCaseFieldsAttrsItem(ImagesPipelineTestCaseFieldsMixin, unittest.TestCase):
item_class = ImagesPipelineTestAttrsItem
class ImagesPipelineTestCaseCustomSettings(unittest.TestCase):
img_cls_attribute_names = [
# Pipeline attribute names with corresponding setting names.
("EXPIRES", "IMAGES_EXPIRES"),
("MIN_WIDTH", "IMAGES_MIN_WIDTH"),
("MIN_HEIGHT", "IMAGES_MIN_HEIGHT"),
("IMAGES_URLS_FIELD", "IMAGES_URLS_FIELD"),
("IMAGES_RESULT_FIELD", "IMAGES_RESULT_FIELD"),
("THUMBS", "IMAGES_THUMBS")
]
# This should match what is defined in ImagesPipeline.
default_pipeline_settings = dict(
MIN_WIDTH=0,
MIN_HEIGHT=0,
EXPIRES=90,
THUMBS={},
IMAGES_URLS_FIELD='image_urls',
IMAGES_RESULT_FIELD='images'
)
def setUp(self):
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
def _generate_fake_settings(self, prefix=None):
"""
:param prefix: string for setting keys
:return: dictionary of image pipeline settings
"""
def random_string():
return "".join([chr(random.randint(97, 123)) for _ in range(10)])
settings = {
"IMAGES_EXPIRES": random.randint(100, 1000),
"IMAGES_STORE": self.tempdir,
"IMAGES_RESULT_FIELD": random_string(),
"IMAGES_URLS_FIELD": random_string(),
"IMAGES_MIN_WIDTH": random.randint(1, 1000),
"IMAGES_MIN_HEIGHT": random.randint(1, 1000),
"IMAGES_THUMBS": {
'small': (random.randint(1, 1000), random.randint(1, 1000)),
'big': (random.randint(1, 1000), random.randint(1, 1000))
}
}
if not prefix:
return settings
return {prefix.upper() + "_" + k if k != "IMAGES_STORE" else k: v for k, v in settings.items()}
def _generate_fake_pipeline_subclass(self):
"""
:return: ImagePipeline class will all uppercase attributes set.
"""
class UserDefinedImagePipeline(ImagesPipeline):
# Values should be in different range than fake_settings.
MIN_WIDTH = random.randint(1000, 2000)
MIN_HEIGHT = random.randint(1000, 2000)
THUMBS = {
'small': (random.randint(1000, 2000), random.randint(1000, 2000)),
'big': (random.randint(1000, 2000), random.randint(1000, 2000))
}
EXPIRES = random.randint(1000, 2000)
IMAGES_URLS_FIELD = "field_one"
IMAGES_RESULT_FIELD = "field_two"
return UserDefinedImagePipeline
def test_different_settings_for_different_instances(self):
"""
If there are two instances of ImagesPipeline class with different settings, they should
have different settings.
"""
custom_settings = self._generate_fake_settings()
default_settings = Settings()
default_sts_pipe = ImagesPipeline(self.tempdir, settings=default_settings)
user_sts_pipe = ImagesPipeline.from_settings(Settings(custom_settings))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
expected_default_value = self.default_pipeline_settings.get(pipe_attr)
custom_value = custom_settings.get(settings_attr)
self.assertNotEqual(expected_default_value, custom_value)
self.assertEqual(getattr(default_sts_pipe, pipe_attr.lower()), expected_default_value)
self.assertEqual(getattr(user_sts_pipe, pipe_attr.lower()), custom_value)
def test_subclass_attrs_preserved_default_settings(self):
"""
If image settings are not defined at all subclass of ImagePipeline takes values
from class attributes.
"""
pipeline_cls = self._generate_fake_pipeline_subclass()
pipeline = pipeline_cls.from_settings(Settings({"IMAGES_STORE": self.tempdir}))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
# Instance attribute (lowercase) must be equal to class attribute (uppercase).
attr_value = getattr(pipeline, pipe_attr.lower())
self.assertNotEqual(attr_value, self.default_pipeline_settings[pipe_attr])
self.assertEqual(attr_value, getattr(pipeline, pipe_attr))
def test_subclass_attrs_preserved_custom_settings(self):
"""
If image settings are defined but they are not defined for subclass default
values taken from settings should be preserved.
"""
pipeline_cls = self._generate_fake_pipeline_subclass()
settings = self._generate_fake_settings()
pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
# Instance attribute (lowercase) must be equal to
# value defined in settings.
value = getattr(pipeline, pipe_attr.lower())
self.assertNotEqual(value, self.default_pipeline_settings[pipe_attr])
setings_value = settings.get(settings_attr)
self.assertEqual(value, setings_value)
def test_no_custom_settings_for_subclasses(self):
"""
If there are no settings for subclass and no subclass attributes, pipeline should use
attributes of base class.
"""
class UserDefinedImagePipeline(ImagesPipeline):
pass
user_pipeline = UserDefinedImagePipeline.from_settings(Settings({"IMAGES_STORE": self.tempdir}))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = self.default_pipeline_settings.get(pipe_attr.upper())
self.assertEqual(getattr(user_pipeline, pipe_attr.lower()), custom_value)
def test_custom_settings_for_subclasses(self):
"""
If there are custom settings for subclass and NO class attributes, pipeline should use custom
settings.
"""
class UserDefinedImagePipeline(ImagesPipeline):
pass
prefix = UserDefinedImagePipeline.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = UserDefinedImagePipeline.from_settings(Settings(settings))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_pipeline_settings[pipe_attr])
self.assertEqual(getattr(user_pipeline, pipe_attr.lower()), custom_value)
def test_custom_settings_and_class_attrs_for_subclasses(self):
"""
If there are custom settings for subclass AND class attributes
setting keys are preferred and override attributes.
"""
pipeline_cls = self._generate_fake_pipeline_subclass()
prefix = pipeline_cls.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_pipeline_settings[pipe_attr])
self.assertEqual(getattr(user_pipeline, pipe_attr.lower()), custom_value)
def test_cls_attrs_with_DEFAULT_prefix(self):
class UserDefinedImagePipeline(ImagesPipeline):
DEFAULT_IMAGES_URLS_FIELD = "something"
DEFAULT_IMAGES_RESULT_FIELD = "something_else"
pipeline = UserDefinedImagePipeline.from_settings(Settings({"IMAGES_STORE": self.tempdir}))
self.assertEqual(pipeline.images_result_field, "something_else")
self.assertEqual(pipeline.images_urls_field, "something")
def test_user_defined_subclass_default_key_names(self):
"""Test situation when user defines subclass of ImagePipeline,
but uses attribute names for default pipeline (without prefixing
them with pipeline class name).
"""
settings = self._generate_fake_settings()
class UserPipe(ImagesPipeline):
pass
pipeline_cls = UserPipe.from_settings(Settings(settings))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
expected_value = settings.get(settings_attr)
self.assertEqual(getattr(pipeline_cls, pipe_attr.lower()),
expected_value)
def _create_image(format, *a, **kw):
buf = io.BytesIO()
Image.new(*a, **kw).save(buf, format)
buf.seek(0)
return Image.open(buf)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "133aa0b111d321e04f8c0d58e01493fb",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 110,
"avg_line_length": 41.791866028708135,
"alnum_prop": 0.6405060392695632,
"repo_name": "dangra/scrapy",
"id": "ad138a2dc5588cbb956c7e4c373ed7cecc2ca1b2",
"size": "17469",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_pipeline_images.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2790"
},
{
"name": "Python",
"bytes": "1670720"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
import StringIO
import csv
import logging
import os
import requests
import sys
from base64 import b64decode
def create_config_files(directory):
"""
Initialize directory ready for vpn walker
:param directory: the path where you want this to happen
:return:
"""
# Some constant strings
vpn_gate_url = "http://www.vpngate.net/api/iphone/"
if not os.path.exists(directory):
os.makedirs(directory)
# get csv into memory
csv_str = ""
logging.info("Downloading info from VPN Gate API...")
r = requests.get(vpn_gate_url)
for line in r.text.split('\n'):
csv_str += line.encode('utf-8')
csv_str += "\n"
# convert csv string to string IO
f = StringIO.StringIO(csv_str)
# generate vpn dict
vpn_dict = {}
reader = csv.reader(f)
reader.next()
reader.next()
for row in reader:
if len(row) == 15:
alpha2 = row[6]
vpn_dict[alpha2] = vpn_dict.get(alpha2, [])
vpn_dict[alpha2].append({
"vpn_name": row[0],
"ip": row[1],
"country_name": row[5],
"alpha2": alpha2,
"openvpn_config": b64decode(row[-1])
})
f.close()
server_country = {}
# write config files
for country in vpn_dict:
for data in vpn_dict[country]:
config_filename = "{}.ovpn".format(data['ip'])
file_path = os.path.join(directory, config_filename)
with open(file_path, 'w') as f:
f.write(data['openvpn_config'])
f.write("up /etc/openvpn/update-resolv-conf\n")
f.write("down /etc/openvpn/update-resolv-conf\n")
server_country[data['ip']] = country
with open(os.path.join(directory, 'servers.txt'), 'w') as f:
for ip in server_country:
f.write('|'.join([ip, server_country[ip]]) + '\n')
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage {0} <directory to create VPNs in>".format(sys.argv[0])
sys.exit(1)
create_config_files(sys.argv[1])
| {
"content_hash": "e98ad55b20f9d728cd571c7a59505976",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 75,
"avg_line_length": 28.56756756756757,
"alnum_prop": 0.5581835383159887,
"repo_name": "rpanah/centinel",
"id": "2f76d2525510577b54d773d74c668eaafce7b1ee",
"size": "2114",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "centinel/vpn/vpngate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "245"
},
{
"name": "Python",
"bytes": "258554"
},
{
"name": "Shell",
"bytes": "8915"
}
],
"symlink_target": ""
} |
""" command line utility for working with FragmentCatalogs (CASE-type analysis)
**Usage**
BuildFragmentCatalog [optional args] <filename>
filename, the name of a delimited text file containing InData, is required
for some modes of operation (see below)
**Command Line Arguments**
- -n *maxNumMols*: specify the maximum number of molecules to be processed
- -b: build the catalog and OnBitLists
*requires InData*
- -s: score compounds
*requires InData and a Catalog, can use OnBitLists*
- -g: calculate info gains
*requires Scores*
- -d: show details about high-ranking fragments
*requires a Catalog and Gains*
- --catalog=*filename*: filename with the pickled catalog.
If -b is provided, this file will be overwritten.
- --onbits=*filename*: filename to hold the pickled OnBitLists.
If -b is provided, this file will be overwritten
- --scores=*filename*: filename to hold the text score data.
If -s is provided, this file will be overwritten
- --gains=*filename*: filename to hold the text gains data.
If -g is provided, this file will be overwritten
- --details=*filename*: filename to hold the text details data.
If -d is provided, this file will be overwritten.
- --minPath=2: specify the minimum length for a path
- --maxPath=6: specify the maximum length for a path
- --smiCol=1: specify which column in the input data file contains
SMILES
- --actCol=-1: specify which column in the input data file contains
activities
- --nActs=2: specify the number of possible activity values
- --nBits=-1: specify the maximum number of bits to show details for
"""
import os
import sys
import numpy
from rdkit import RDConfig
from rdkit.Chem import FragmentCatalog
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.ML import InfoTheory
import pickle
def message(msg, dest=sys.stdout):
dest.write(msg)
def BuildCatalog(suppl, maxPts=-1, groupFileName=None, minPath=2, maxPath=6, reportFreq=10):
""" builds a fragment catalog from a set of molecules in a delimited text block
**Arguments**
- suppl: a mol supplier
- maxPts: (optional) if provided, this will set an upper bound on the
number of points to be considered
- groupFileName: (optional) name of the file containing functional group
information
- minPath, maxPath: (optional) names of the minimum and maximum path lengths
to be considered
- reportFreq: (optional) how often to display status information
**Returns**
a FragmentCatalog
"""
if groupFileName is None:
groupFileName = os.path.join(RDConfig.RDDataDir, "FunctionalGroups.txt")
fpParams = FragmentCatalog.FragCatParams(minPath, maxPath, groupFileName)
catalog = FragmentCatalog.FragCatalog(fpParams)
fgen = FragmentCatalog.FragCatGenerator()
if maxPts > 0:
nPts = maxPts
else:
if hasattr(suppl, '__len__'):
nPts = len(suppl)
else:
nPts = -1
for i, mol in enumerate(suppl):
if i == nPts:
break
if i and not i % reportFreq:
if nPts > -1:
message('Done %d of %d, %d paths\n' % (i, nPts, catalog.GetFPLength()))
else:
message('Done %d, %d paths\n' % (i, catalog.GetFPLength()))
fgen.AddFragsFromMol(mol, catalog)
return catalog
def ScoreMolecules(suppl, catalog, maxPts=-1, actName='', acts=None, nActs=2, reportFreq=10):
""" scores the compounds in a supplier using a catalog
**Arguments**
- suppl: a mol supplier
- catalog: the FragmentCatalog
- maxPts: (optional) the maximum number of molecules to be
considered
- actName: (optional) the name of the molecule's activity property.
If this is not provided, the molecule's last property will be used.
- acts: (optional) a sequence of activity values (integers).
If not provided, the activities will be read from the molecules.
- nActs: (optional) number of possible activity values
- reportFreq: (optional) how often to display status information
**Returns**
a 2-tuple:
1) the results table (a 3D array of ints nBits x 2 x nActs)
2) a list containing the on bit lists for each molecule
"""
nBits = catalog.GetFPLength()
resTbl = numpy.zeros((nBits, 2, nActs), numpy.int)
obls = []
if not actName and not acts:
actName = suppl[0].GetPropNames()[-1]
fpgen = FragmentCatalog.FragFPGenerator()
suppl.reset()
i = 1
for mol in suppl:
if i and not i % reportFreq:
message('Done %d.\n' % (i))
if mol:
if not acts:
act = int(mol.GetProp(actName))
else:
act = acts[i - 1]
fp = fpgen.GetFPForMol(mol, catalog)
obls.append([x for x in fp.GetOnBits()])
for j in range(nBits):
resTbl[j, 0, act] += 1
for id_ in obls[i - 1]:
resTbl[id_ - 1, 0, act] -= 1
resTbl[id_ - 1, 1, act] += 1
else:
obls.append([])
i += 1
return resTbl, obls
def ScoreFromLists(bitLists, suppl, catalog, maxPts=-1, actName='', acts=None, nActs=2,
reportFreq=10):
""" similar to _ScoreMolecules()_, but uses pre-calculated bit lists
for the molecules (this speeds things up a lot)
**Arguments**
- bitLists: sequence of on bit sequences for the input molecules
- suppl: the input supplier (we read activities from here)
- catalog: the FragmentCatalog
- maxPts: (optional) the maximum number of molecules to be
considered
- actName: (optional) the name of the molecule's activity property.
If this is not provided, the molecule's last property will be used.
- nActs: (optional) number of possible activity values
- reportFreq: (optional) how often to display status information
**Returns**
the results table (a 3D array of ints nBits x 2 x nActs)
"""
nBits = catalog.GetFPLength()
if maxPts > 0:
nPts = maxPts
else:
nPts = len(bitLists)
resTbl = numpy.zeros((nBits, 2, nActs), numpy.int)
if not actName and not acts:
actName = suppl[0].GetPropNames()[-1]
suppl.reset()
for i in range(1, nPts + 1):
mol = next(suppl)
if not acts:
act = int(mol.GetProp(actName))
else:
act = acts[i - 1]
if i and not i % reportFreq:
message('Done %d of %d\n' % (i, nPts))
ids = set()
for id_ in bitLists[i - 1]:
ids.add(id_ - 1)
for j in range(nBits):
resTbl[j, 0, act] += 1
for id_ in ids:
resTbl[id_, 0, act] -= 1
resTbl[id_, 1, act] += 1
return resTbl
def CalcGains(suppl, catalog, topN=-1, actName='', acts=None, nActs=2, reportFreq=10, biasList=None,
collectFps=0):
""" calculates info gains by constructing fingerprints
*DOC*
Returns a 2-tuple:
1) gains matrix
2) list of fingerprints
"""
nBits = catalog.GetFPLength()
if topN < 0:
topN = nBits
if not actName and not acts:
actName = suppl[0].GetPropNames()[-1]
if hasattr(suppl, '__len__'):
nMols = len(suppl)
else:
nMols = -1
fpgen = FragmentCatalog.FragFPGenerator()
# ranker = InfoTheory.InfoBitRanker(nBits,nActs,InfoTheory.InfoType.ENTROPY)
if biasList:
ranker = InfoTheory.InfoBitRanker(nBits, nActs, InfoTheory.InfoType.BIASENTROPY)
ranker.SetBiasList(biasList)
else:
ranker = InfoTheory.InfoBitRanker(nBits, nActs, InfoTheory.InfoType.ENTROPY)
i = 0
fps = []
for mol in suppl:
if not acts:
try:
act = int(mol.GetProp(actName))
except KeyError:
message('ERROR: Molecule has no property: %s\n' % (actName))
message('\tAvailable properties are: %s\n' % (str(mol.GetPropNames())))
raise KeyError(actName)
else:
act = acts[i]
if i and not i % reportFreq:
if nMols > 0:
message('Done %d of %d.\n' % (i, nMols))
else:
message('Done %d.\n' % (i))
fp = fpgen.GetFPForMol(mol, catalog)
ranker.AccumulateVotes(fp, act)
i += 1
if collectFps:
fps.append(fp)
gains = ranker.GetTopN(topN)
return gains, fps
def CalcGainsFromFps(suppl, fps, topN=-1, actName='', acts=None, nActs=2, reportFreq=10,
biasList=None):
""" calculates info gains from a set of fingerprints
*DOC*
"""
nBits = len(fps[0])
if topN < 0:
topN = nBits
if not actName and not acts:
actName = suppl[0].GetPropNames()[-1]
if hasattr(suppl, '__len__'):
nMols = len(suppl)
else:
nMols = -1
if biasList:
ranker = InfoTheory.InfoBitRanker(nBits, nActs, InfoTheory.InfoType.BIASENTROPY)
ranker.SetBiasList(biasList)
else:
ranker = InfoTheory.InfoBitRanker(nBits, nActs, InfoTheory.InfoType.ENTROPY)
for i, mol in enumerate(suppl):
if not acts:
try:
act = int(mol.GetProp(actName))
except KeyError:
message('ERROR: Molecule has no property: %s\n' % (actName))
message('\tAvailable properties are: %s\n' % (str(mol.GetPropNames())))
raise KeyError(actName)
else:
act = acts[i]
if i and not i % reportFreq:
if nMols > 0:
message('Done %d of %d.\n' % (i, nMols))
else:
message('Done %d.\n' % (i))
fp = fps[i]
ranker.AccumulateVotes(fp, act)
gains = ranker.GetTopN(topN)
return gains
def OutputGainsData(outF, gains, cat, nActs=2):
actHeaders = ['Act-%d' % (x) for x in range(nActs)]
if cat:
outF.write('id,Description,Gain,%s\n' % (','.join(actHeaders)))
else:
outF.write('id,Gain,%s\n' % (','.join(actHeaders)))
for entry in gains:
id_ = int(entry[0])
outL = [str(id_)]
if cat:
descr = cat.GetBitDescription(id_)
outL.append(descr)
outL.append('%.6f' % entry[1])
outL += ['%d' % x for x in entry[2:]]
outF.write(','.join(outL))
outF.write('\n')
def ProcessGainsData(inF, delim=',', idCol=0, gainCol=1):
""" reads a list of ids and info gains out of an input file
"""
res = []
_ = inF.readline()
for line in inF:
splitL = line.strip().split(delim)
res.append((splitL[idCol], float(splitL[gainCol])))
return res
def ShowDetails(catalog, gains, nToDo=-1, outF=sys.stdout, idCol=0, gainCol=1, outDelim=','):
"""
gains should be a sequence of sequences. The idCol entry of each
sub-sequence should be a catalog ID. _ProcessGainsData()_ provides
suitable input.
"""
if nToDo < 0:
nToDo = len(gains)
for i in range(nToDo):
id_ = int(gains[i][idCol])
gain = float(gains[i][gainCol])
descr = catalog.GetFragDescription(id_)
if descr:
outF.write('%s\n' % (outDelim.join((str(id_), descr, str(gain)))))
def SupplierFromDetails(details):
from rdkit.VLib.NodeLib.DbMolSupply import DbMolSupplyNode
from rdkit.VLib.NodeLib.SmilesSupply import SmilesSupplyNode
if details.dbName:
conn = DbConnect(details.dbName, details.tableName)
suppl = DbMolSupplyNode(conn.GetData())
else:
suppl = SmilesSupplyNode(details.inFileName, delim=details.delim, nameColumn=details.nameCol,
smilesColumn=details.smiCol, titleLine=details.hasTitle)
if isinstance(details.actCol, int):
suppl.reset()
m = next(suppl)
actName = m.GetPropNames()[details.actCol]
details.actCol = actName
if isinstance(details.nameCol, int):
suppl.reset()
m = next(suppl)
nameName = m.GetPropNames()[details.nameCol]
details.nameCol = nameName
suppl.reset()
if isinstance(details.actCol, int):
suppl.reset()
m = next(suppl)
actName = m.GetPropNames()[details.actCol]
details.actCol = actName
if isinstance(details.nameCol, int):
suppl.reset()
m = next(suppl)
nameName = m.GetPropNames()[details.nameCol]
details.nameCol = nameName
suppl.reset()
return suppl
def Usage():
print("This is BuildFragmentCatalog")
print('usage error')
# print(__doc__)
sys.exit(-1)
class RunDetails(object):
numMols = -1
doBuild = 0
doSigs = 0
doScore = 0
doGains = 0
doDetails = 0
catalogName = None
onBitsName = None
scoresName = None
gainsName = None
dbName = ''
tableName = None
detailsName = None
inFileName = None
fpName = None
minPath = 2
maxPath = 6
smiCol = 1
actCol = -1
nameCol = -1
hasTitle = 1
nActs = 2
nBits = -1
delim = ','
biasList = None
topN = -1
def ParseArgs(details):
import getopt
try:
args, extras = getopt.getopt(sys.argv[1:], 'n:d:cst',
['catalog=', 'onbits=', 'scoresFile=', 'gainsFile=',
'detailsFile=', 'fpFile=', 'minPath=', 'maxPath=', 'smiCol=',
'actCol=', 'nameCol=', 'nActs=', 'nBits=', 'biasList=', 'topN=',
'build', 'sigs', 'gains', 'details', 'score', 'noTitle'])
except Exception:
sys.stderr.write('Error parsing command line:\n')
import traceback
traceback.print_exc()
Usage()
for arg, val in args:
if arg == '-n':
details.numMols = int(val)
elif arg == '-c':
details.delim = ','
elif arg == '-s':
details.delim = ' '
elif arg == '-t':
details.delim = '\t'
elif arg == '-d':
details.dbName = val
elif arg == '--build':
details.doBuild = 1
elif arg == '--score':
details.doScore = 1
elif arg == '--gains':
details.doGains = 1
elif arg == '--sigs':
details.doSigs = 1
elif arg == '-details':
details.doDetails = 1
elif arg == '--catalog':
details.catalogName = val
elif arg == '--onbits':
details.onBitsName = val
elif arg == '--scoresFile':
details.scoresName = val
elif arg == '--gainsFile':
details.gainsName = val
elif arg == '--detailsFile':
details.detailsName = val
elif arg == '--fpFile':
details.fpName = val
elif arg == '--minPath':
details.minPath = int(val)
elif arg == '--maxPath':
details.maxPath = int(val)
elif arg == '--smiCol':
try:
details.smiCol = int(val)
except ValueError:
details.smiCol = val
elif arg == '--actCol':
try:
details.actCol = int(val)
except ValueError:
details.actCol = val
elif arg == '--nameCol':
try:
details.nameCol = int(val)
except ValueError:
details.nameCol = val
elif arg == '--nActs':
details.nActs = int(val)
elif arg == '--nBits':
details.nBits = int(val)
elif arg == '--noTitle':
details.hasTitle = 0
elif arg == '--biasList':
details.biasList = tuple(eval(val))
elif arg == '--topN':
details.topN = int(val)
elif arg == '-h':
Usage()
sys.exit(0)
else:
Usage()
if len(extras):
if details.dbName:
details.tableName = extras[0]
else:
details.inFileName = extras[0]
else:
Usage()
if __name__ == '__main__':
import time
details = RunDetails()
ParseArgs(details)
from io import StringIO
suppl = SupplierFromDetails(details)
cat = None
obls = None
if details.doBuild:
if not suppl:
message("We require inData to generate a catalog\n")
sys.exit(-2)
message("Building catalog\n")
t1 = time.time()
cat = BuildCatalog(suppl, maxPts=details.numMols, minPath=details.minPath,
maxPath=details.maxPath)
t2 = time.time()
message("\tThat took %.2f seconds.\n" % (t2 - t1))
if details.catalogName:
message("Dumping catalog data\n")
pickle.dump(cat, open(details.catalogName, 'wb+'))
elif details.catalogName:
message("Loading catalog\n")
cat = pickle.load(open(details.catalogName, 'rb'))
if details.onBitsName:
try:
obls = pickle.load(open(details.onBitsName, 'rb'))
except Exception:
obls = None
else:
if len(obls) < (inD.count('\n') - 1):
obls = None
scores = None
if details.doScore:
if not suppl:
message("We require inData to score molecules\n")
sys.exit(-2)
if not cat:
message("We require a catalog to score molecules\n")
sys.exit(-2)
message("Scoring compounds\n")
if not obls or len(obls) < details.numMols:
scores, obls = ScoreMolecules(suppl, cat, maxPts=details.numMols, actName=details.actCol,
nActs=details.nActs)
if details.scoresName:
pickle.dump(scores, open(details.scoresName, 'wb+'))
if details.onBitsName:
pickle.dump(obls, open(details.onBitsName, 'wb+'))
else:
scores = ScoreFromLists(obls, suppl, cat, maxPts=details.numMols, actName=details.actCol,
nActs=details.nActs)
elif details.scoresName:
scores = pickle.load(open(details.scoresName, 'rb'))
if details.fpName and os.path.exists(details.fpName) and not details.doSigs:
message("Reading fingerprints from file.\n")
fps = pickle.load(open(details.fpName, 'rb'))
else:
fps = []
gains = None
if details.doGains:
if not suppl:
message("We require inData to calculate gains\n")
sys.exit(-2)
if not (cat or fps):
message("We require either a catalog or fingerprints to calculate gains\n")
sys.exit(-2)
message("Calculating Gains\n")
t1 = time.time()
if details.fpName:
collectFps = 1
else:
collectFps = 0
if not fps:
gains, fps = CalcGains(suppl, cat, topN=details.topN, actName=details.actCol,
nActs=details.nActs, biasList=details.biasList, collectFps=collectFps)
if details.fpName:
message("Writing fingerprint file.\n")
tmpF = open(details.fpName, 'wb+')
pickle.dump(fps, tmpF, 1)
tmpF.close()
else:
gains = CalcGainsFromFps(suppl, fps, topN=details.topN, actName=details.actCol,
nActs=details.nActs, biasList=details.biasList)
t2 = time.time()
message("\tThat took %.2f seconds.\n" % (t2 - t1))
if details.gainsName:
outF = open(details.gainsName, 'w+')
OutputGainsData(outF, gains, cat, nActs=details.nActs)
else:
if details.gainsName:
inF = open(details.gainsName, 'r')
gains = ProcessGainsData(inF)
if details.doDetails:
if not cat:
message("We require a catalog to get details\n")
sys.exit(-2)
if not gains:
message("We require gains data to get details\n")
sys.exit(-2)
io = StringIO()
io.write('id,SMILES,gain\n')
ShowDetails(cat, gains, nToDo=details.nBits, outF=io)
if details.detailsName:
open(details.detailsName, 'w+').write(io.getvalue())
else:
sys.stderr.write(io.getvalue())
| {
"content_hash": "3eed547cf9ea4e04c78a753c39e22dcc",
"timestamp": "",
"source": "github",
"line_count": 653,
"max_line_length": 105,
"avg_line_length": 31.67228177641654,
"alnum_prop": 0.5625181317087322,
"repo_name": "ptosco/rdkit",
"id": "b1925d93279ad103319169cb82870986d88a43c9",
"size": "20973",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "rdkit/Chem/BuildFragmentCatalog.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1595174"
},
{
"name": "C#",
"bytes": "10167"
},
{
"name": "C++",
"bytes": "13851292"
},
{
"name": "CMake",
"bytes": "761863"
},
{
"name": "Dockerfile",
"bytes": "2590"
},
{
"name": "Fortran",
"bytes": "7590"
},
{
"name": "HTML",
"bytes": "43059702"
},
{
"name": "Java",
"bytes": "369457"
},
{
"name": "JavaScript",
"bytes": "54009"
},
{
"name": "Jupyter Notebook",
"bytes": "498341"
},
{
"name": "LLVM",
"bytes": "40048"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "10862"
},
{
"name": "Python",
"bytes": "4157348"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "SWIG",
"bytes": "342569"
},
{
"name": "Shell",
"bytes": "3822"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "61677"
}
],
"symlink_target": ""
} |
from django.contrib.gis import admin
from .models import CollectionResource
admin.site.unregister(CollectionResource)
| {
"content_hash": "c56e930f6d160dd37603bf9e63f405a3",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 41,
"avg_line_length": 29.75,
"alnum_prop": 0.8571428571428571,
"repo_name": "hydroshare/hydroshare",
"id": "7e0c1edb870f00215b4ac45200732cc262bf3260",
"size": "119",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "hs_collection_resource/admin.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "183727"
},
{
"name": "Dockerfile",
"bytes": "1433"
},
{
"name": "HTML",
"bytes": "950010"
},
{
"name": "JavaScript",
"bytes": "1450537"
},
{
"name": "Python",
"bytes": "5786593"
},
{
"name": "R",
"bytes": "4904"
},
{
"name": "Shell",
"bytes": "94173"
},
{
"name": "Vue",
"bytes": "32043"
}
],
"symlink_target": ""
} |
"""
RoleGroup module
"""
from ovs.dal.dataobject import DataObject
from ovs.dal.structures import Relation
from ovs.dal.hybrids.role import Role
from ovs.dal.hybrids.group import Group
class RoleGroup(DataObject):
"""
The RoleGroup class represents the junction table between Role and Group.
"""
__properties = []
__relations = [Relation('role', Role, 'groups'),
Relation('group', Group, 'roles')]
__dynamics = []
| {
"content_hash": "91157f336dac4ca519f46ce4c2b121e1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 77,
"avg_line_length": 27,
"alnum_prop": 0.6688453159041394,
"repo_name": "mflu/openvstorage_centos",
"id": "be0266cb52e7bd9a64aeab93bb82ad72d6853e1b",
"size": "1040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ovs/dal/hybrids/j_rolegroup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10475"
},
{
"name": "JavaScript",
"bytes": "698676"
},
{
"name": "Makefile",
"bytes": "1269"
},
{
"name": "Python",
"bytes": "1500612"
},
{
"name": "Shell",
"bytes": "16586"
}
],
"symlink_target": ""
} |
"""
The ``wind_farm`` module contains the class WindFarm that implements
a wind farm in the windpowerlib and functions needed for the modelling of a
wind farm.
SPDX-FileCopyrightText: 2019 oemof developer group <contact@oemof.org>
SPDX-License-Identifier: MIT
"""
from windpowerlib import tools, power_curves, WindTurbine
import numpy as np
import pandas as pd
import warnings
class WindFarm(object):
r"""
Defines a standard set of wind farm attributes.
Parameters
----------
wind_turbine_fleet : :pandas:`pandas.DataFrame<frame>` or list(:class:`~windpowerlib.wind_turbine.WindTurbineGroup`)
The wind turbine fleet specifies the turbine types in the wind farm and
their corresponding number or total installed capacity. There are
different options to provide the wind turbine fleet (see also examples
below):
* :pandas:`pandas.DataFrame<frame>` -
DataFrame must have columns 'wind_turbine' containing a
:class:`~.wind_turbine.WindTurbine` object and either
'number_of_turbines' (number of wind turbines of the same turbine
type in the wind farm, can be a float) or 'total_capacity'
(installed capacity of wind turbines of the same turbine type in the
wind farm).
* list(:class:`~windpowerlib.wind_turbine.WindTurbineGroup`) -
A :class:`~windpowerlib.wind_turbine.WindTurbineGroup` can be created
from a :class:`~windpowerlib.wind_turbine.WindTurbine` using the
:func:`~windpowerlib.wind_turbine.WindTurbine.to_group` method.
* list(dict) -
It is still possible to use a list of dictionaries (see example) but
we recommend to use one of the other options above.
efficiency : float or :pandas:`pandas.DataFrame<frame>` or None (optional)
Efficiency of the wind farm. Provide as either constant (float) or
power efficiency curve (pd.DataFrame) containing 'wind_speed' and
'efficiency' columns with wind speeds in m/s and the corresponding
dimensionless wind farm efficiency. Default: None.
name : str (optional)
Can be used as an identifier of the wind farm. Default: ''.
Attributes
----------
wind_turbine_fleet : :pandas:`pandas.DataFrame<frame>`
Wind turbines of wind farm. DataFrame must have 'wind_turbine'
(contains a :class:`~.wind_turbine.WindTurbine` object) and
'number_of_turbines' (number of wind turbines of the same turbine type
in the wind farm) as columns.
efficiency : float or :pandas:`pandas.DataFrame<frame>` or None
Efficiency of the wind farm. Either constant (float) power efficiency
curve (pd.DataFrame) containing 'wind_speed' and 'efficiency'
columns with wind speeds in m/s and the corresponding
dimensionless wind farm efficiency. Default: None.
name : str
If set this is used as an identifier of the wind farm.
hub_height : float
The calculated mean hub height of the wind farm. See
:py:func:`mean_hub_height` for more information.
power_curve : :pandas:`pandas.DataFrame<frame>` or None
The calculated power curve of the wind farm. See
:py:func:`assign_power_curve` for more information.
Examples
--------
>>> from windpowerlib import wind_farm
>>> from windpowerlib import WindTurbine
>>> import pandas as pd
>>> enerconE126 = {
... 'hub_height': 135,
... 'rotor_diameter': 127,
... 'turbine_type': 'E-126/4200'}
>>> e126 = WindTurbine(**enerconE126)
>>> vestasV90 = {
... 'hub_height': 90,
... 'turbine_type': 'V90/2000',
... 'nominal_power': 2e6}
>>> v90 = WindTurbine(**vestasV90)
>>> # turbine fleet as DataFrame
>>> wind_turbine_fleet = pd.DataFrame(
... {'wind_turbine': [e126, v90],
... 'number_of_turbines': [6, None],
... 'total_capacity': [None, 3 * 2e6]})
>>> example_farm = wind_farm.WindFarm(wind_turbine_fleet, name='my_farm')
>>> print(example_farm.nominal_power)
31200000.0
>>> # turbine fleet as a list of WindTurbineGroup objects using the
>>> # 'to_group' method.
>>> wind_turbine_fleet = [e126.to_group(6),
... v90.to_group(total_capacity=3 * 2e6)]
>>> example_farm = wind_farm.WindFarm(wind_turbine_fleet, name='my_farm')
>>> print(example_farm.nominal_power)
31200000.0
>>> # turbine fleet as list of dictionaries (not recommended)
>>> example_farm_data = {
... 'name': 'my_farm',
... 'wind_turbine_fleet': [{'wind_turbine': e126,
... 'number_of_turbines': 6},
... {'wind_turbine': v90,
... 'total_capacity': 3 * 2e6}]}
>>> example_farm = wind_farm.WindFarm(**example_farm_data)
>>> print(example_farm.nominal_power)
31200000.0
"""
def __init__(self, wind_turbine_fleet, efficiency=None, name="", **kwargs):
self.wind_turbine_fleet = wind_turbine_fleet
self.efficiency = efficiency
self.name = name
self.hub_height = None
self._nominal_power = None
self.power_curve = None
self.check_and_complete_wind_turbine_fleet()
def check_and_complete_wind_turbine_fleet(self):
"""
Function to check wind turbine fleet user input.
Besides checking if all necessary parameters to fully define the wind
turbine fleet are provided, this function also fills in the
number of turbines or total capacity of each turbine type and checks
if they are consistent.
"""
# convert list to dataframe if necessary
if isinstance(self.wind_turbine_fleet, list):
self.wind_turbine_fleet = pd.DataFrame(self.wind_turbine_fleet)
# check wind turbines
try:
for turbine in self.wind_turbine_fleet["wind_turbine"]:
if not isinstance(turbine, WindTurbine):
raise ValueError(
"Wind turbine must be provided as WindTurbine object "
"but was provided as {}.".format(type(turbine))
)
except KeyError:
raise KeyError(
"Missing wind_turbine key/column in "
"wind_turbine_fleet parameter."
)
# add columns for number of turbines and total capacity if they don't
# yet exist
if "number_of_turbines" not in self.wind_turbine_fleet.columns:
self.wind_turbine_fleet["number_of_turbines"] = np.nan
if "total_capacity" not in self.wind_turbine_fleet.columns:
self.wind_turbine_fleet["total_capacity"] = np.nan
# calculate number of turbines if necessary
number_turbines_not_provided = self.wind_turbine_fleet[
self.wind_turbine_fleet["number_of_turbines"].isnull()
]
for ix, row in number_turbines_not_provided.iterrows():
msg = (
"Number of turbines of type {0} can not be deduced "
"from total capacity. Please either provide "
"`number_of_turbines` in the turbine fleet definition or "
"set the nominal power of the wind turbine."
)
try:
number_of_turbines = (
row["total_capacity"] / row["wind_turbine"].nominal_power
)
if np.isnan(number_of_turbines):
raise ValueError(msg.format(row["wind_turbine"]))
else:
self.wind_turbine_fleet.loc[
ix, "number_of_turbines"
] = number_of_turbines
except TypeError:
raise ValueError(msg.format(row["wind_turbine"]))
# calculate total capacity if necessary and check that total capacity
# and number of turbines is consistent if both are provided
for ix, row in self.wind_turbine_fleet.iterrows():
if np.isnan(row["total_capacity"]):
try:
self.wind_turbine_fleet.loc[ix, "total_capacity"] = (
row["number_of_turbines"]
* row["wind_turbine"].nominal_power
)
except TypeError:
raise ValueError(
"Total capacity of turbines of type {turbine} cannot "
"be deduced. Please check if the nominal power of the "
"wind turbine is set.".format(
turbine=row["wind_turbine"]
)
)
else:
if (
not abs(
row["total_capacity"]
- (
row["number_of_turbines"]
* row["wind_turbine"].nominal_power
)
)
< 1
):
self.wind_turbine_fleet.loc[ix, "total_capacity"] = (
row["number_of_turbines"]
* row["wind_turbine"].nominal_power
)
msg = (
"The provided total capacity of WindTurbine {0} has "
"been overwritten as it was not consistent with the "
"number of turbines provided for this type."
)
warnings.warn(
msg.format(row["wind_turbine"]),
tools.WindpowerlibUserWarning,
)
def __repr__(self):
if self.name != "":
return "Wind farm: {name}".format(name=self.name)
else:
return "Wind farm with turbine fleet: [number, type]\n {}".format(
self.wind_turbine_fleet.loc[
:, ["number_of_turbines", "wind_turbine"]
].values
)
@property
def nominal_power(self):
r"""
The nominal power is the sum of the nominal power of all turbines.
Returns
-------
float
Nominal power of the wind farm in W.
"""
if not self._nominal_power:
self.nominal_power = self.wind_turbine_fleet.total_capacity.sum()
return self._nominal_power
@nominal_power.setter
def nominal_power(self, nominal_power):
self._nominal_power = nominal_power
def mean_hub_height(self):
r"""
Calculates the mean hub height of the wind farm.
The mean hub height of a wind farm is necessary for power output
calculations with an aggregated wind farm power curve containing wind
turbines with different hub heights. Hub heights of wind turbines with
higher nominal power weigh more than others.
After the calculations the mean hub height is assigned to the attribute
:py:attr:`~hub_height`.
Returns
-------
:class:`~.wind_farm.WindFarm`
self
Notes
-----
The following equation is used [1]_:
.. math:: h_{WF} = e^{\sum\limits_{k}{ln(h_{WT,k})}
\frac{P_{N,k}}{\sum\limits_{k}{P_{N,k}}}}
with:
:math:`h_{WF}`: mean hub height of wind farm,
:math:`h_{WT,k}`: hub height of the k-th wind turbine of a wind
farm, :math:`P_{N,k}`: nominal power of the k-th wind turbine
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 35
"""
self.hub_height = np.exp(
sum(
np.log(row["wind_turbine"].hub_height) * row["total_capacity"]
for ix, row in self.wind_turbine_fleet.iterrows()
)
/ self.nominal_power
)
return self
def assign_power_curve(
self,
wake_losses_model="wind_farm_efficiency",
smoothing=False,
block_width=0.5,
standard_deviation_method="turbulence_intensity",
smoothing_order="wind_farm_power_curves",
turbulence_intensity=None,
**kwargs,
):
r"""
Calculates the power curve of a wind farm.
The wind farm power curve is calculated by aggregating the power curves
of all wind turbines in the wind farm. Depending on the parameters the
power curves are smoothed (before or after the aggregation) and/or a
wind farm efficiency (power efficiency curve or constant efficiency) is
applied after the aggregation.
After the calculations the power curve is assigned to the attribute
:py:attr:`~power_curve`.
Parameters
----------
wake_losses_model : str
Defines the method for taking wake losses within the farm into
consideration. Options: 'wind_farm_efficiency' or None.
Default: 'wind_farm_efficiency'.
smoothing : bool
If True the power curves will be smoothed before or after the
aggregation of power curves depending on `smoothing_order`.
Default: False.
block_width : float
Width between the wind speeds in the sum of the equation in
:py:func:`~.power_curves.smooth_power_curve`. Default: 0.5.
standard_deviation_method : str
Method for calculating the standard deviation for the Gauss
distribution. Options: 'turbulence_intensity',
'Staffell_Pfenninger'. Default: 'turbulence_intensity'.
smoothing_order : str
Defines when the smoothing takes place if `smoothing` is True.
Options: 'turbine_power_curves' (to the single turbine power
curves), 'wind_farm_power_curves'.
Default: 'wind_farm_power_curves'.
turbulence_intensity : float
Turbulence intensity at hub height of the wind farm for power curve
smoothing with 'turbulence_intensity' method. Can be calculated
from `roughness_length` instead. Default: None.
roughness_length : float (optional)
Roughness length. If `standard_deviation_method` is
'turbulence_intensity' and `turbulence_intensity` is not given
the turbulence intensity is calculated via the roughness length.
Returns
-------
:class:`~.wind_farm.WindFarm`
self
"""
# Check if all wind turbines have a power curve as attribute
for turbine in self.wind_turbine_fleet["wind_turbine"]:
if turbine.power_curve is None:
raise ValueError(
"For an aggregated wind farm power curve "
+ "each wind turbine needs a power curve "
+ "but `power_curve` of '{}' is None.".format(turbine)
)
# Initialize data frame for power curve values
df = pd.DataFrame()
for ix, row in self.wind_turbine_fleet.iterrows():
# Check if needed parameters are available and/or assign them
if smoothing:
if (
standard_deviation_method == "turbulence_intensity"
and turbulence_intensity is None
):
if (
"roughness_length" in kwargs
and kwargs["roughness_length"] is not None
):
# Calculate turbulence intensity and write to kwargs
turbulence_intensity = tools.estimate_turbulence_intensity(
row["wind_turbine"].hub_height,
kwargs["roughness_length"],
)
kwargs["turbulence_intensity"] = turbulence_intensity
else:
raise ValueError(
"`roughness_length` must be defined for using "
+ "'turbulence_intensity' as "
+ "`standard_deviation_method` if "
+ "`turbulence_intensity` is not given"
)
# Get original power curve
power_curve = pd.DataFrame(row["wind_turbine"].power_curve)
# Editions to the power curves before the summation
if smoothing and smoothing_order == "turbine_power_curves":
power_curve = power_curves.smooth_power_curve(
power_curve["wind_speed"],
power_curve["value"],
standard_deviation_method=standard_deviation_method,
block_width=block_width,
**kwargs,
)
else:
# Add value zero to start and end of curve as otherwise
# problems can occur during the aggregation
if power_curve.iloc[0]["wind_speed"] != 0.0:
power_curve = pd.concat(
[
pd.DataFrame(
data={"value": [0.0], "wind_speed": [0.0]}
),
power_curve,
],
join="inner",
)
if power_curve.iloc[-1]["value"] != 0.0:
power_curve = pd.concat(
[
power_curve,
pd.DataFrame(
data={
"wind_speed": [
power_curve["wind_speed"].loc[
power_curve.index[-1]
]
+ 0.5
],
"value": [0.0],
}
),
],
join="inner",
)
# Add power curves of all turbine types to data frame
# (multiplied by turbine amount)
df = pd.concat(
[
df,
pd.DataFrame(
power_curve.set_index(["wind_speed"])
* row["number_of_turbines"]
),
],
axis=1,
)
# Aggregate all power curves
wind_farm_power_curve = pd.DataFrame(
df.interpolate(method="index").sum(axis=1)
)
wind_farm_power_curve.columns = ["value"]
wind_farm_power_curve.reset_index(inplace=True)
# Apply power curve smoothing and consideration of wake losses
# after the summation
if smoothing and smoothing_order == "wind_farm_power_curves":
wind_farm_power_curve = power_curves.smooth_power_curve(
wind_farm_power_curve["wind_speed"],
wind_farm_power_curve["value"],
standard_deviation_method=standard_deviation_method,
block_width=block_width,
**kwargs,
)
if wake_losses_model == "wind_farm_efficiency":
if self.efficiency is not None:
wind_farm_power_curve = power_curves.wake_losses_to_power_curve(
wind_farm_power_curve["wind_speed"].values,
wind_farm_power_curve["value"].values,
wind_farm_efficiency=self.efficiency,
)
else:
msg = (
"If you use `wake_losses_model` '{model}' your WindFarm "
"needs an efficiency but `efficiency` is {eff}. \n\n"
"Failing farm:\n {farm}"
)
raise ValueError(
msg.format(
model=wake_losses_model, farm=self, eff=self.efficiency
)
)
self.power_curve = wind_farm_power_curve
return self
| {
"content_hash": "a7501c1a1113653d94a504b22dc1dd35",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 120,
"avg_line_length": 42.418219461697724,
"alnum_prop": 0.5343615775087857,
"repo_name": "wind-python/windpowerlib",
"id": "aa806aa19ea829f7d9faa9debbfb72e2190caa69",
"size": "20490",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "windpowerlib/wind_farm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "181328"
},
{
"name": "Python",
"bytes": "251227"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
app_name = "frappe"
app_title = "Frappe Framework"
app_publisher = "Frappe Technologies Pvt. Ltd."
app_description = "Full Stack Web Application Framework in Python"
app_icon = "octicon octicon-circuit-board"
app_version = "5.0.34"
app_color = "orange"
app_email = "support@frappe.io"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
# website
app_include_js = [
"assets/js/desk.min.js",
"assets/js/editor.min.js",
"assets/js/list.min.js",
"assets/js/form.min.js",
"assets/js/report.min.js",
"assets/js/module.min.js"
]
app_include_css = [
"assets/css/desk.min.css",
"assets/css/list.min.css",
"assets/css/form.min.css",
"assets/css/report.min.css",
"assets/css/module.min.css"
]
web_include_js = [
"website_script.js"
]
bootstrap = "assets/frappe/css/bootstrap.css"
web_include_css = [
"assets/css/frappe-web.css",
"website_theme.css"
]
website_route_rules = [
{"from_route": "/blog", "to_route": "Blog Post"},
{"from_route": "/blog/<category>", "to_route": "Blog Post"}
]
website_context = {
"hero": {
"blog": "templates/includes/blog/hero.html"
}
}
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
website_generators = ["Web Page", "Blog Post", "Blog Category", "Web Form"]
email_append_to = ["Event", "ToDo", "Communication"]
calendars = ["Event"]
# login
on_session_creation = [
"frappe.desk.doctype.feed.feed.login_feed",
"frappe.core.doctype.user.user.notifify_admin_access_to_system_manager"
]
# permissions
permission_query_conditions = {
"Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.desk.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions",
"Feed": "frappe.desk.doctype.feed.feed.get_permission_query_conditions",
"Note": "frappe.desk.doctype.note.note.get_permission_query_conditions"
}
has_permission = {
"Event": "frappe.desk.doctype.event.event.has_permission",
"ToDo": "frappe.desk.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission",
"Feed": "frappe.desk.doctype.feed.feed.has_permission",
"Note": "frappe.desk.doctype.note.note.has_permission"
}
standard_queries = {
"User": "frappe.core.doctype.user.user.user_query"
}
doc_events = {
"*": {
"after_insert": "frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"validate": "frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"on_update": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"frappe.desk.doctype.feed.feed.update_feed"
],
"after_rename": "frappe.desk.notifications.clear_doctype_notifications",
"on_submit": [
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"frappe.desk.doctype.feed.feed.update_feed"
],
"on_cancel": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts"
],
"on_trash": "frappe.desk.notifications.clear_doctype_notifications"
}
}
scheduler_events = {
"all": [
"frappe.email.bulk.flush",
"frappe.email.doctype.email_account.email_account.pull",
"frappe.email.doctype.email_account.email_account.notify_unreplied",
],
"daily": [
"frappe.email.bulk.clear_outbox",
"frappe.desk.notifications.clear_notifications",
"frappe.core.doctype.scheduler_log.scheduler_log.set_old_logs_as_seen",
"frappe.desk.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.email.doctype.email_alert.email_alert.trigger_daily_alerts",
]
}
default_background = "/assets/frappe/images/ui/into-the-dawn.jpg"
get_translated_dict = {
("doctype", "System Settings"): "frappe.geo.country_info.get_translated_dict"
}
| {
"content_hash": "c6dbaab2eeb1194090f8df77eee759f9",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 86,
"avg_line_length": 29.77037037037037,
"alnum_prop": 0.7210748942523015,
"repo_name": "mbauskar/Das_frappe",
"id": "a3a207f39e94cc7402a96af98d7b4b2726e32e8d",
"size": "4019",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "237130"
},
{
"name": "HTML",
"bytes": "130309"
},
{
"name": "JavaScript",
"bytes": "1330220"
},
{
"name": "Python",
"bytes": "1086358"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
import json
from django.conf import settings
from raven.processors import Processor
class JSONProcessor(Processor):
"""
This is a sentry wrapper to process the JSON and remove anything from it
that could be considered as leaking sensitive data. Sentry has some
processor for doing this, but they don't work with JSON posted in the body.
"""
def process(self, data, **kwargs):
http = data.get('sentry.interfaces.Http', None)
if not http:
return data
try:
http['data'] = json.dumps(sanitise(json.loads(http['data'])))
except (TypeError, ValueError):
# At this point we've got invalid JSON so things likely went
# horribly wrong.
pass
return data
def sanitise(data, keys=None):
"""Sanitises keys in a dictionary."""
keys = keys or settings.SENSITIVE_DATA_KEYS
def recurse(leaf):
for k, v in leaf.iteritems():
if isinstance(v, dict):
recurse(v)
if k in keys:
leaf[k] = '*' * 8
try:
recurse(data)
except AttributeError:
return data
return data
| {
"content_hash": "8e9e6a7a6a465f02839ca70664fd6b5e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 27.348837209302324,
"alnum_prop": 0.6011904761904762,
"repo_name": "muffinresearch/solitude",
"id": "311174ebb360981220c4763b2094589ce7621aeb",
"size": "1176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solitude/processor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "405779"
},
{
"name": "Shell",
"bytes": "3235"
}
],
"symlink_target": ""
} |
'''
Supported functions/arguments/defaults:
'''
connect_kwargs = {
'device': 'localhost', # ip or hostname of TRex
'trex_rpc_port': None, # TRex extention: RPC port of TRex server (for several TRexes under same OS)
'trex_pub_port': None, # TRex extention: Publisher port of TRex server (for several TRexes under same OS)
'port_list': None, # list of ports
'username': 'TRexUser',
'reset': True,
'break_locks': False,
}
cleanup_session_kwargs = {
'maintain_lock': False, # release ports at the end or not
'port_list': None,
'port_handle': None,
}
traffic_config_kwargs = {
'mode': None, # ( create | modify | remove | reset )
'split_by_cores': 'split', # ( split | duplicate | single ) TRex extention: split = split traffic by cores, duplicate = duplicate traffic for all cores, single = run only with sinle core (not implemented yet)
'load_profile': None, # TRex extention: path to filename with stream profile (stream builder parameters will be ignored, limitation: modify)
'consistent_random': False, # TRex extention: False (default) = random sequence will be different every run, True = random sequence will be same every run
'ignore_macs': False, # TRex extention: True = use MACs from server configuration, no MAC VM (workaround on lack of ARP)
'disable_flow_stats': False, # TRex extention: True = don't use flow stats for this stream, (workaround for limitation on type of packet for flow_stats)
'flow_stats_id': None, # TRex extention: uint, for use of STLHltStream, specifies id for flow stats (see stateless manual for flow_stats details)
'port_handle': None,
'port_handle2': None,
'bidirectional': False,
# stream builder parameters
'transmit_mode': 'continuous', # ( continuous | multi_burst | single_burst )
'rate_pps': None,
'rate_bps': None,
'rate_percent': 10,
'stream_id': None,
'name': None,
'direction': 0, # TRex extention: 1 = exchange sources and destinations, 0 = do nothing
'pkts_per_burst': 1,
'burst_loop_count': 1,
'inter_burst_gap': 12,
'length_mode': 'fixed', # ( auto | fixed | increment | decrement | random | imix )
'l3_imix1_size': 64,
'l3_imix1_ratio': 7,
'l3_imix2_size': 570,
'l3_imix2_ratio': 4,
'l3_imix3_size': 1518,
'l3_imix3_ratio': 1,
'l3_imix4_size': 9230,
'l3_imix4_ratio': 0,
#L2
'frame_size': 64,
'frame_size_min': 64,
'frame_size_max': 64,
'frame_size_step': 1,
'l2_encap': 'ethernet_ii', # ( ethernet_ii | ethernet_ii_vlan )
'mac_src': '00:00:01:00:00:01',
'mac_dst': '00:00:00:00:00:00',
'mac_src2': '00:00:01:00:00:01',
'mac_dst2': '00:00:00:00:00:00',
'mac_src_mode': 'fixed', # ( fixed | increment | decrement | random )
'mac_src_step': 1, # we are changing only 32 lowest bits
'mac_src_count': 1,
'mac_dst_mode': 'fixed', # ( fixed | increment | decrement | random )
'mac_dst_step': 1, # we are changing only 32 lowest bits
'mac_dst_count': 1,
'mac_src2_mode': 'fixed', # ( fixed | increment | decrement | random )
'mac_src2_step': 1,
'mac_src2_count': 1,
'mac_dst2_mode': 'fixed', # ( fixed | increment | decrement | random )
'mac_dst2_step': 1,
'mac_dst2_count': 1,
# vlan options below can have multiple values for nested Dot1Q headers
'vlan_user_priority': 1,
'vlan_priority_mode': 'fixed', # ( fixed | increment | decrement | random )
'vlan_priority_count': 1,
'vlan_priority_step': 1,
'vlan_id': 0,
'vlan_id_mode': 'fixed', # ( fixed | increment | decrement | random )
'vlan_id_count': 1,
'vlan_id_step': 1,
'vlan_cfi': 1,
'vlan_protocol_tag_id': None,
#L3, general
'l3_protocol': None, # ( ipv4 | ipv6 )
'l3_length_min': 110,
'l3_length_max': 238,
'l3_length_step': 1,
#L3, IPv4
'ip_precedence': 0,
'ip_tos_field': 0,
'ip_mbz': 0,
'ip_delay': 0,
'ip_throughput': 0,
'ip_reliability': 0,
'ip_cost': 0,
'ip_reserved': 0,
'ip_dscp': 0,
'ip_cu': 0,
'l3_length': None,
'ip_id': 0,
'ip_fragment_offset': 0,
'ip_ttl': 64,
'ip_checksum': None,
'ip_src_addr': '0.0.0.0',
'ip_dst_addr': '192.0.0.1',
'ip_src_mode': 'fixed', # ( fixed | increment | decrement | random )
'ip_src_step': 1, # ip or number
'ip_src_count': 1,
'ip_dst_mode': 'fixed', # ( fixed | increment | decrement | random )
'ip_dst_step': 1, # ip or number
'ip_dst_count': 1,
#L3, IPv6
'ipv6_traffic_class': 0,
'ipv6_flow_label': 0,
'ipv6_length': None,
'ipv6_next_header': None,
'ipv6_hop_limit': 64,
'ipv6_src_addr': 'fe80:0:0:0:0:0:0:12',
'ipv6_dst_addr': 'fe80:0:0:0:0:0:0:22',
'ipv6_src_mode': 'fixed', # ( fixed | increment | decrement | random )
'ipv6_src_step': 1, # we are changing only 32 lowest bits; can be ipv6 or number
'ipv6_src_count': 1,
'ipv6_dst_mode': 'fixed', # ( fixed | increment | decrement | random )
'ipv6_dst_step': 1, # we are changing only 32 lowest bits; can be ipv6 or number
'ipv6_dst_count': 1,
#L4, TCP
'l4_protocol': None, # ( tcp | udp )
'tcp_src_port': 1024,
'tcp_dst_port': 80,
'tcp_seq_num': 1,
'tcp_ack_num': 1,
'tcp_data_offset': 5,
'tcp_fin_flag': 0,
'tcp_syn_flag': 0,
'tcp_rst_flag': 0,
'tcp_psh_flag': 0,
'tcp_ack_flag': 0,
'tcp_urg_flag': 0,
'tcp_window': 4069,
'tcp_checksum': None,
'tcp_urgent_ptr': 0,
'tcp_src_port_mode': 'increment', # ( increment | decrement | random )
'tcp_src_port_step': 1,
'tcp_src_port_count': 1,
'tcp_dst_port_mode': 'increment', # ( increment | decrement | random )
'tcp_dst_port_step': 1,
'tcp_dst_port_count': 1,
# L4, UDP
'udp_src_port': 1024,
'udp_dst_port': 80,
'udp_length': None,
'udp_dst_port_mode': 'increment', # ( increment | decrement | random )
'udp_src_port_step': 1,
'udp_src_port_count': 1,
'udp_src_port_mode': 'increment', # ( increment | decrement | random )
'udp_dst_port_step': 1,
'udp_dst_port_count': 1,
}
traffic_control_kwargs = {
'action': None, # ( clear_stats | run | stop | sync_run | poll | reset )
'port_handle': None,
}
traffic_stats_kwargs = {
'mode': 'aggregate', # ( all | aggregate | streams )
'port_handle': None,
}
import sys
import os
import socket
import copy
from collections import defaultdict
from .api import *
from .trex_stl_types import *
from .utils.common import get_number
class HLT_ERR(dict):
def __init__(self, log = 'Unknown error', **kwargs):
dict.__init__(self, {'status': 0})
if type(log) is dict:
dict.update(self, log)
elif type(log) is str and not log.startswith('[ERR]'):
self['log'] = '[ERR] ' + log
else:
self['log'] = log
dict.update(self, kwargs)
class HLT_OK(dict):
def __init__(self, init_dict = {}, **kwargs):
dict.__init__(self, {'status': 1, 'log': None})
dict.update(self, init_dict)
dict.update(self, kwargs)
def merge_kwargs(default_kwargs, user_kwargs):
kwargs = copy.deepcopy(default_kwargs)
for key, value in user_kwargs.items():
if key in kwargs:
kwargs[key] = value
elif key in ('save_to_yaml', 'save_to_pcap', 'pg_id'): # internal arguments
kwargs[key] = value
else:
print("Warning: provided parameter '%s' is not supported" % key)
return kwargs
# change MACs from formats 01-23-45-67-89-10 or 0123.4567.8910 or {01 23 45 67 89 10} to Scapy format 01:23:45:67:89:10
def correct_macs(kwargs):
list_of_mac_args = ['mac_src', 'mac_dst', 'mac_src2', 'mac_dst2']
list_of_mac_steps = ['mac_src_step', 'mac_dst_step', 'mac_src2_step', 'mac_dst2_step']
for mac_arg in list_of_mac_args + list_of_mac_steps:
if mac_arg in kwargs:
mac_value = kwargs[mac_arg]
if is_integer(mac_value) and mac_arg in list_of_mac_steps: # step can be number
continue
if type(mac_value) is not str: raise STLError('Argument %s should be str' % mac_arg)
mac_value = mac_value.replace('{', '').replace('}', '').strip().replace('-', ' ').replace(':', ' ').replace('.', ' ')
if mac_value[4] == ' ' and mac_value[9] == ' ':
mac_value = ' '.join([mac_value[0:2], mac_value[2:7], mac_value[7:12], mac_value[12:14]])
mac_value = ':'.join(mac_value.split())
try:
mac2str(mac_value) # verify we are ok
kwargs[mac_arg] = mac_value
except:
raise STLError('Incorrect MAC %s=%s, please use 01:23:45:67:89:10 or 01-23-45-67-89-10 or 0123.4567.8910 or {01 23 45 67 89 10}' % (mac_arg, kwargs[mac_arg]))
def is_true(input):
if input in (True, 'True', 'true', 1, '1', 'enable', 'Enable', 'Yes', 'yes', 'y', 'Y', 'enabled', 'Enabled'):
return True
return False
def error(err = None):
if not err:
raise Exception('Unknown exception, look traceback')
if type(err) is str and not err.startswith('[ERR]'):
err = '[ERR] ' + err
print(err)
sys.exit(1)
def check_res(res):
if res['status'] == 0:
error('Encountered error:\n%s' % res['log'])
return res
def print_brief_stats(res):
title_str = ' '*3
tx_str = 'TX:'
rx_str = 'RX:'
for port_id, stat in res.items():
if type(port_id) is not int:
continue
title_str += ' '*10 + 'Port%s' % port_id
tx_str += '%15s' % res[port_id]['aggregate']['tx']['total_pkts']
rx_str += '%15s' % res[port_id]['aggregate']['rx']['total_pkts']
print(title_str)
print(tx_str)
print(rx_str)
def wait_with_progress(seconds):
for i in range(0, seconds):
time.sleep(1)
sys.stdout.write('.')
sys.stdout.flush()
print('')
format_error = lambda e: e if isinstance(e, STLError) else traceback.format_exc()
# dict of streams per port
# hlt_history = False: holds list of stream_id per port
# hlt_history = True: act as dictionary (per port) stream_id -> hlt arguments used for build
class CStreamsPerPort(defaultdict):
def __init__(self, hlt_history = False):
self.hlt_history = hlt_history
if self.hlt_history:
defaultdict.__init__(self, dict)
else:
defaultdict.__init__(self, list)
def get_stream_list(self, ports_list = None):
if self.hlt_history:
if ports_list is None:
ports_list = self.keys()
elif not isinstance(ports_list, list):
ports_list = [ports_list]
ret = {}
for port in ports_list:
ret[port] = self[port].keys()
return ret
else:
return self
# add to stream_id list per port, no HLT args, res = HLT result
def add_streams_from_res(self, res):
if self.hlt_history: raise STLError('CStreamsPerPort: this object is not meant for HLT history, try init with hlt_history = False')
if not isinstance(res, dict): raise STLError('CStreamsPerPort: res should be dict')
if res.get('status') != 1: raise STLError('CStreamsPerPort: res has status %s' % res.get('status'))
res_streams = res.get('stream_id')
if not isinstance(res_streams, dict):
raise STLError('CStreamsPerPort: stream_id in res should be dict')
for port, port_stream_ids in res_streams.items():
if type(port_stream_ids) is not list:
port_stream_ids = [port_stream_ids]
self[port].extend(port_stream_ids)
# save HLT args to modify streams later
def save_stream_args(self, ports_list, stream_id, stream_hlt_args):
if stream_id is None: raise STLError('CStreamsPerPort: no stream_id in stream')
if stream_hlt_args.get('load_profile'): return # can't modify profiles, don't save
if not self.hlt_history: raise STLError('CStreamsPerPort: this object works only with HLT history, try init with hlt_history = True')
if not is_integer(stream_id): raise STLError('CStreamsPerPort: stream_id should be number')
if not isinstance(stream_hlt_args, dict): raise STLError('CStreamsPerPort: stream_hlt_args should be dict')
if not isinstance(ports_list, list):
ports_list = [ports_list]
for port in ports_list:
if stream_id not in self[port]:
self[port][stream_id] = {}
self[port][stream_id].update(stream_hlt_args)
def remove_stream(self, ports_list, stream_id):
if not isinstance(ports_list, list):
ports_list = [ports_list]
if not isinstance(stream_id, dict):
raise STLError('CStreamsPerPort: stream_hlt_args should be dict')
for port in ports_list:
if port not in self:
raise STLError('CStreamsPerPort: port %s not defined' % port)
if stream_id not in self[port]:
raise STLError('CStreamsPerPort: stream_id %s not found at port %s' % (port, stream_id))
if self.hlt_history:
del self[port][stream_id]
else:
self[port].pop(stream_id)
class CTRexHltApi(object):
def __init__(self, verbose = 0):
self.trex_client = None
self.verbose = verbose
self._last_pg_id = 0 # pg_id acts as stream_handle
self._streams_history = {} # streams in format of HLT arguments for modify later
self._native_handle_by_pg_id = {} # pg_id -> native handle + port
self._pg_id_by_id = {} # stream_id -> pg_id
self._pg_id_by_name = {} # name -> pg_id
###########################
# Session functions #
###########################
def connect(self, **user_kwargs):
kwargs = merge_kwargs(connect_kwargs, user_kwargs)
device = kwargs['device']
try:
device = socket.gethostbyname(device) # work with ip
except: # give it another try
try:
device = socket.gethostbyname(device)
except Exception as e:
return HLT_ERR('Could not translate hostname "%s" to IP: %s' % (device, e))
try:
zmq_ports = {}
if kwargs['trex_rpc_port']:
zmq_ports['sync_port'] = kwargs['trex_rpc_port']
if kwargs['trex_pub_port']:
zmq_ports['async_port'] = kwargs['trex_pub_port']
self.trex_client = STLClient(kwargs['username'], device, verbose_level = self.verbose, **zmq_ports)
except Exception as e:
return HLT_ERR('Could not init stateless client %s: %s' % (device, format_error(e)))
try:
self.trex_client.connect()
except Exception as e:
self.trex_client = None
return HLT_ERR('Could not connect to device %s: %s' % (device, format_error(e)))
# connection successfully created with server, try acquiring ports of TRex
try:
port_list = self._parse_port_list(kwargs['port_list'])
self.trex_client.acquire(ports = port_list, force = kwargs['break_locks'])
for port in port_list:
self._native_handle_by_pg_id[port] = {}
except Exception as e:
self.trex_client = None
return HLT_ERR('Could not acquire ports %s: %s' % (port_list, format_error(e)))
# arrived here, all desired ports were successfully acquired
if kwargs['reset']:
# remove all port traffic configuration from TRex
try:
self.trex_client.stop(ports = port_list)
self.trex_client.reset(ports = port_list)
except Exception as e:
self.trex_client = None
return HLT_ERR('Error in reset traffic: %s' % format_error(e))
self._streams_history = CStreamsPerPort(hlt_history = True)
return HLT_OK(port_handle = dict([(port_id, port_id) for port_id in port_list]))
def cleanup_session(self, **user_kwargs):
kwargs = merge_kwargs(cleanup_session_kwargs, user_kwargs)
if not kwargs['maintain_lock']:
# release taken ports
port_list = kwargs['port_list'] or kwargs['port_handle'] or 'all'
try:
if port_list == 'all':
port_list = self.trex_client.get_acquired_ports()
else:
port_list = self._parse_port_list(port_list)
except Exception as e:
return HLT_ERR('Unable to determine which ports to release: %s' % format_error(e))
try:
self.trex_client.stop(port_list)
except Exception as e:
return HLT_ERR('Unable to stop traffic %s: %s' % (port_list, format_error(e)))
try:
self.trex_client.remove_all_streams(port_list)
except Exception as e:
return HLT_ERR('Unable to remove all streams %s: %s' % (port_list, format_error(e)))
try:
self.trex_client.release(port_list)
except Exception as e:
return HLT_ERR('Unable to release ports %s: %s' % (port_list, format_error(e)))
try:
self.trex_client.disconnect(stop_traffic = False, release_ports = False)
except Exception as e:
return HLT_ERR('Error disconnecting: %s' % e)
self.trex_client = None
return HLT_OK()
def interface_config(self, port_handle, mode='config'):
if not self.trex_client:
return HLT_ERR('Connect first')
ALLOWED_MODES = ['config', 'modify', 'destroy']
if mode not in ALLOWED_MODES:
return HLT_ERR('Mode must be one of the following values: %s' % ALLOWED_MODES)
# pass this function for now...
return HLT_ERR('interface_config not implemented yet')
###########################
# Traffic functions #
###########################
def traffic_config(self, **user_kwargs):
if not self.trex_client:
return HLT_ERR('Connect first')
try:
correct_macs(user_kwargs)
except Exception as e:
return HLT_ERR(format_error(e))
kwargs = merge_kwargs(traffic_config_kwargs, user_kwargs)
stream_id = kwargs['stream_id']
mode = kwargs['mode']
pg_id = kwargs['flow_stats_id']
port_handle = port_list = self._parse_port_list(kwargs['port_handle'])
ALLOWED_MODES = ['create', 'modify', 'remove', 'enable', 'disable', 'reset']
if mode not in ALLOWED_MODES:
return HLT_ERR('Mode must be one of the following values: %s' % ALLOWED_MODES)
if mode == 'reset':
try:
self.trex_client.remove_all_streams(port_handle)
for port in port_handle:
if port in self._streams_history:
del self._streams_history[port]
return HLT_OK()
except Exception as e:
return HLT_ERR('Could not reset streams at ports %s: %s' % (port_handle, format_error(e)))
if mode == 'remove':
if stream_id is None:
return HLT_ERR('Please specify stream_id to remove.')
if stream_id == 'all':
try:
self.trex_client.remove_all_streams(port_handle)
for port in port_handle:
if port in self._streams_history:
del self._streams_history[port]
except Exception as e:
return HLT_ERR('Could not remove all streams at ports %s: %s' % (port_handle, format_error(e)))
else:
try:
self._remove_stream(stream_id, port_handle)
except Exception as e:
return HLT_ERR('Could not remove streams with specified by %s, error: %s' % (stream_id, format_error(e)))
return HLT_OK()
#if mode == 'enable':
# stream_id = kwargs.get('stream_id')
# if stream_id is None:
# return HLT_ERR('Please specify stream_id to enable.')
# if stream_id not in self._streams_history:
# return HLT_ERR('This stream_id (%s) was not used before, please create new.' % stream_id)
# self._streams_history[stream_id].update(kwargs) # <- the modification
if mode == 'modify': # we remove stream and create new one with same stream_id
pg_id = kwargs.get('stream_id')
if pg_id is None:
return HLT_ERR('Please specify stream_id to modify.')
if len(port_handle) > 1:
for port in port_handle:
try:
user_kwargs['port_handle'] = port
res = self.traffic_config(**user_kwargs)
if res['status'] == 0:
return HLT_ERR('Error during modify of stream: %s' % res['log'])
except Exception as e:
return HLT_ERR('Could not remove stream(s) %s from port(s) %s: %s' % (stream_id, port_handle, format_error(e)))
return HLT_OK()
else:
if type(port_handle) is list:
port = port_handle[0]
else:
port = port_handle
if port not in self._streams_history:
return HLT_ERR('Port %s was not used/acquired' % port)
if pg_id not in self._streams_history[port]:
return HLT_ERR('This stream_id (%s) was not used before at port %s, please create new.' % (stream_id, port))
new_kwargs = {}
new_kwargs.update(self._streams_history[port][pg_id])
new_kwargs.update(user_kwargs)
user_kwargs = new_kwargs
try:
self._remove_stream(pg_id, [port])
except Exception as e:
return HLT_ERR('Could not remove stream(s) %s from port(s) %s: %s' % (stream_id, port_handle, format_error(e)))
if mode == 'create' or mode == 'modify':
# create a new stream with desired attributes, starting by creating packet
if is_true(kwargs['bidirectional']): # two streams with opposite directions
del user_kwargs['bidirectional']
stream_per_port = {}
save_to_yaml = user_kwargs.get('save_to_yaml')
bidirect_err = 'When using bidirectional flag, '
if len(port_handle) != 1:
return HLT_ERR(bidirect_err + 'port_handle should be single port handle.')
port_handle = port_handle[0]
port_handle2 = kwargs['port_handle2']
if (type(port_handle2) is list and len(port_handle2) > 1) or port_handle2 is None:
return HLT_ERR(bidirect_err + 'port_handle2 should be single port handle.')
try:
if save_to_yaml and type(save_to_yaml) is str:
user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_bi1.yaml')
res1 = self.traffic_config(**user_kwargs)
if res1['status'] == 0:
raise STLError('Could not create bidirectional stream 1: %s' % res1['log'])
stream_per_port[port_handle] = res1['stream_id']
kwargs['direction'] = 1 - kwargs['direction'] # not
correct_direction(user_kwargs, kwargs)
if save_to_yaml and type(save_to_yaml) is str:
user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_bi2.yaml')
user_kwargs['port_handle'] = port_handle2
res2 = self.traffic_config(**user_kwargs)
if res2['status'] == 0:
raise STLError('Could not create bidirectional stream 2: %s' % res2['log'])
stream_per_port[port_handle2] = res2['stream_id']
except Exception as e:
return HLT_ERR('Could not generate bidirectional traffic: %s' % format_error(e))
if mode == 'create':
return HLT_OK(stream_id = stream_per_port)
else:
return HLT_OK()
try:
if not pg_id:
pg_id = self._get_available_pg_id()
if kwargs['load_profile']:
stream_obj = STLProfile.load_py(kwargs['load_profile'], direction = kwargs['direction'])
else:
user_kwargs['pg_id'] = pg_id
stream_obj = STLHltStream(**user_kwargs)
except Exception as e:
return HLT_ERR('Could not create stream: %s' % format_error(e))
# try adding the stream per ports
try:
for port in port_handle:
stream_id_arr = self.trex_client.add_streams(streams = stream_obj,
ports = port)
self._streams_history.save_stream_args(port, pg_id, user_kwargs)
if type(stream_id_arr) is not list:
stream_id_arr = [stream_id_arr]
self._native_handle_by_pg_id[port][pg_id] = stream_id_arr
except Exception as e:
return HLT_ERR('Could not add stream to ports: %s' % format_error(e))
if mode == 'create':
return HLT_OK(stream_id = pg_id)
else:
return HLT_OK()
return HLT_ERR('Got to the end of traffic_config, mode not implemented or forgot "return" somewhere.')
def traffic_control(self, **user_kwargs):
if not self.trex_client:
return HLT_ERR('Connect first')
kwargs = merge_kwargs(traffic_control_kwargs, user_kwargs)
action = kwargs['action']
port_handle = kwargs['port_handle']
ALLOWED_ACTIONS = ['clear_stats', 'run', 'stop', 'sync_run', 'poll', 'reset']
if action not in ALLOWED_ACTIONS:
return HLT_ERR('Action must be one of the following values: {actions}'.format(actions=ALLOWED_ACTIONS))
if action == 'run':
try:
self.trex_client.start(ports = port_handle)
except Exception as e:
return HLT_ERR('Could not start traffic: %s' % format_error(e))
elif action == 'sync_run': # (clear_stats + run)
try:
self.trex_client.clear_stats(ports = port_handle)
self.trex_client.start(ports = port_handle)
except Exception as e:
return HLT_ERR('Unable to do sync_run: %s' % format_error(e))
elif action == 'stop':
try:
self.trex_client.stop(ports = port_handle)
except Exception as e:
return HLT_ERR('Could not stop traffic: %s' % format_error(e))
elif action == 'reset':
try:
self.trex_client.reset(ports = port_handle)
for port in port_handle:
if port in self._streams_history:
del self._streams_history[port]
except Exception as e:
return HLT_ERR('Could not reset traffic: %s' % format_error(e))
elif action == 'clear_stats':
try:
self.trex_client.clear_stats(ports = port_handle)
except Exception as e:
return HLT_ERR('Could not clear stats: %s' % format_error(e))
elif action != 'poll': # at poll just return 'stopped' status
return HLT_ERR("Action '%s' is not supported yet on TRex" % action)
try:
is_traffic_active = self.trex_client.is_traffic_active(ports = port_handle)
except Exception as e:
return HLT_ERR('Unable to determine ports status: %s' % format_error(e))
return HLT_OK(stopped = not is_traffic_active)
def traffic_stats(self, **user_kwargs):
if not self.trex_client:
return HLT_ERR('Connect first')
kwargs = merge_kwargs(traffic_stats_kwargs, user_kwargs)
mode = kwargs['mode']
port_handle = kwargs['port_handle']
if type(port_handle) is not list:
port_handle = [port_handle]
ALLOWED_MODES = ['aggregate', 'streams', 'all']
if mode not in ALLOWED_MODES:
return HLT_ERR("'mode' must be one of the following values: %s" % ALLOWED_MODES)
hlt_stats_dict = dict([(port, {}) for port in port_handle])
ports_speed = {}
for port_id in port_handle:
ports_speed[port_id] = self.trex_client.ports[port_id].get_speed_bps()
try:
stats = self.trex_client.get_stats(port_handle)
if mode in ('all', 'aggregate'):
for port_id in port_handle:
port_stats = stats[port_id]
if is_integer(port_id):
hlt_stats_dict[port_id]['aggregate'] = {
'tx': {
'pkt_bit_rate': port_stats.get('tx_bps', 0),
'pkt_byte_count': port_stats.get('obytes', 0),
'pkt_count': port_stats.get('opackets', 0),
'pkt_rate': port_stats.get('tx_pps', 0),
'total_pkt_bytes': port_stats.get('obytes', 0),
'total_pkt_rate': port_stats.get('tx_pps', 0),
'total_pkts': port_stats.get('opackets', 0),
},
'rx': {
'pkt_bit_rate': port_stats.get('rx_bps', 0),
'pkt_byte_count': port_stats.get('ibytes', 0),
'pkt_count': port_stats.get('ipackets', 0),
'pkt_rate': port_stats.get('rx_pps', 0),
'total_pkt_bytes': port_stats.get('ibytes', 0),
'total_pkt_rate': port_stats.get('rx_pps', 0),
'total_pkts': port_stats.get('ipackets', 0),
}
}
if mode in ('all', 'streams'):
for pg_id, pg_stats in stats['flow_stats'].items():
try:
pg_id = int(pg_id)
except:
continue
for port_id in port_handle:
if 'stream' not in hlt_stats_dict[port_id]:
hlt_stats_dict[port_id]['stream'] = {}
hlt_stats_dict[port_id]['stream'][pg_id] = {
'tx': {
'total_pkts': pg_stats['tx_pkts'].get(port_id, 0),
'total_pkt_bytes': pg_stats['tx_bytes'].get(port_id, 0),
'total_pkts_bytes': pg_stats['tx_bytes'].get(port_id, 0),
'total_pkt_bit_rate': pg_stats['tx_bps'].get(port_id, 0),
'total_pkt_rate': pg_stats['tx_pps'].get(port_id, 0),
'line_rate_percentage': pg_stats['tx_bps_l1'].get(port_id, 0) * 100.0 / ports_speed[port_id] if ports_speed[port_id] else 0,
},
'rx': {
'total_pkts': pg_stats['rx_pkts'].get(port_id, 0),
'total_pkt_bytes': pg_stats['rx_bytes'].get(port_id, 0),
'total_pkts_bytes': pg_stats['rx_bytes'].get(port_id, 0),
'total_pkt_bit_rate': pg_stats['rx_bps'].get(port_id, 0),
'total_pkt_rate': pg_stats['rx_pps'].get(port_id, 0),
'line_rate_percentage': pg_stats['rx_bps_l1'].get(port_id, 0) * 100.0 / ports_speed[port_id] if ports_speed[port_id] else 0,
},
}
except Exception as e:
return HLT_ERR('Could not retrieve stats: %s' % format_error(e))
return HLT_OK(hlt_stats_dict)
# timeout = maximal time to wait
def wait_on_traffic(self, port_handle = None, timeout = None):
try:
self.trex_client.wait_on_traffic(port_handle, timeout)
except Exception as e:
return HLT_ERR('Unable to run wait_on_traffic: %s' % format_error(e))
###########################
# Private functions #
###########################
def _get_available_pg_id(self):
pg_id = self._last_pg_id
used_pg_ids = self.trex_client.get_stats()['flow_stats'].keys()
for i in range(65535):
pg_id += 1
if pg_id not in used_pg_ids:
self._last_pg_id = pg_id
return pg_id
if pg_id == 65535:
pg_id = 0
raise STLError('Could not find free pg_id in range [1, 65535].')
# remove streams from given port(s).
# stream_id can be:
# * int - exact stream_id value
# * list - list of stream_id values or strings (see below)
# * string - exact stream_id value, mix of ranges/list separated by comma: 2, 4-13
def _remove_stream(self, stream_id, port_handle):
stream_num = get_number(stream_id)
if stream_num is not None: # exact value of int or str
for port in port_handle:
native_handles = self._native_handle_by_pg_id[port][stream_num]
self.trex_client.remove_streams(native_handles, port) # actual remove
del self._native_handle_by_pg_id[port][stream_num]
del self._streams_history[port][stream_num]
return
if type(stream_id) is list: # list of values/strings
for each_stream_id in stream_id:
self._remove_stream(each_stream_id, port_handle) # recurse
return
if type(stream_id) is str: # range or list in string
if ',' in stream_id:
for each_stream_id_element in stream_id.split(','):
self._remove_stream(each_stream_id_element, port_handle) # recurse
return
if '-' in stream_id:
stream_id_min, stream_id_max = stream_id.split('-', 1)
stream_id_min = get_number(stream_id_min)
stream_id_max = get_number(stream_id_max)
if stream_id_min is None:
raise STLError('_remove_stream: wrong range param %s' % stream_id_min)
if stream_id_max is None:
raise STLError('_remove_stream: wrong range param %s' % stream_id_max)
if stream_id_max < stream_id_min:
raise STLError('_remove_stream: right range param is smaller than left one: %s-%s' % (stream_id_min, stream_id_max))
for each_stream_id in xrange(stream_id_min, stream_id_max + 1):
self._remove_stream(each_stream_id, port_handle) # recurse
return
raise STLError('_remove_stream: wrong stream_id param %s' % stream_id)
@staticmethod
def _parse_port_list(port_list):
if type(port_list) is str:
return [int(port) for port in port_list.strip().split()]
elif type(port_list) is list:
return [int(port) for port in port_list]
elif is_integer(port_list):
return [int(port_list)]
raise STLError('port_list should be string with ports, list, or single number')
def STLHltStream(**user_kwargs):
kwargs = merge_kwargs(traffic_config_kwargs, user_kwargs)
# verify rate is given by at most one arg
rate_args = set(['rate_pps', 'rate_bps', 'rate_percent'])
intersect_rate_args = list(rate_args & set(user_kwargs.keys()))
if len(intersect_rate_args) > 1:
raise STLError('More than one rate argument specified: %s' % intersect_rate_args)
try:
rate_key = intersect_rate_args[0]
except IndexError:
rate_key = 'rate_percent'
if rate_key is 'rate_percent' and float(kwargs['rate_percent']) > 100:
raise STLError('rate_percent should not exceed 100%')
if kwargs['length_mode'] == 'imix': # several streams with given length
streams_arr = []
user_kwargs['length_mode'] = 'fixed'
if kwargs['l3_imix1_size'] < 32 or kwargs['l3_imix2_size'] < 32 or kwargs['l3_imix3_size'] < 32 or kwargs['l3_imix4_size'] < 32:
raise STLError('l3_imix*_size should be at least 32')
save_to_yaml = kwargs.get('save_to_yaml')
total_rate = float(kwargs[rate_key])
if rate_key == 'rate_pps': # ratio in packets as is
imix1_weight = kwargs['l3_imix1_ratio']
imix2_weight = kwargs['l3_imix2_ratio']
imix3_weight = kwargs['l3_imix3_ratio']
imix4_weight = kwargs['l3_imix4_ratio']
if rate_key == 'rate_bps': # ratio dependent on L2 size too
imix1_weight = kwargs['l3_imix1_ratio'] * kwargs['l3_imix1_size']
imix2_weight = kwargs['l3_imix2_ratio'] * kwargs['l3_imix2_size']
imix3_weight = kwargs['l3_imix3_ratio'] * kwargs['l3_imix3_size']
imix4_weight = kwargs['l3_imix4_ratio'] * kwargs['l3_imix4_size']
elif rate_key == 'rate_percent': # ratio dependent on L1 size too
imix1_weight = kwargs['l3_imix1_ratio'] * (kwargs['l3_imix1_size'] + 20)
imix2_weight = kwargs['l3_imix2_ratio'] * (kwargs['l3_imix2_size'] + 20)
imix3_weight = kwargs['l3_imix3_ratio'] * (kwargs['l3_imix3_size'] + 20)
imix4_weight = kwargs['l3_imix4_ratio'] * (kwargs['l3_imix4_size'] + 20)
total_weight = float(imix1_weight + imix2_weight + imix3_weight + imix4_weight)
if total_weight == 0:
raise STLError('Used length_mode imix, but all the ratios are 0')
if kwargs['l3_imix1_ratio'] > 0:
if save_to_yaml and type(save_to_yaml) is str:
user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix1.yaml')
user_kwargs['frame_size'] = kwargs['l3_imix1_size']
user_kwargs[rate_key] = total_rate * imix1_weight / total_weight
streams_arr.append(STLHltStream(**user_kwargs))
if kwargs['l3_imix2_ratio'] > 0:
if save_to_yaml and type(save_to_yaml) is str:
user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix2.yaml')
user_kwargs['frame_size'] = kwargs['l3_imix2_size']
user_kwargs[rate_key] = total_rate * imix2_weight / total_weight
streams_arr.append(STLHltStream(**user_kwargs))
if kwargs['l3_imix3_ratio'] > 0:
if save_to_yaml and type(save_to_yaml) is str:
user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix3.yaml')
user_kwargs['frame_size'] = kwargs['l3_imix3_size']
user_kwargs[rate_key] = total_rate * imix3_weight / total_weight
streams_arr.append(STLHltStream(**user_kwargs))
if kwargs['l3_imix4_ratio'] > 0:
if save_to_yaml and type(save_to_yaml) is str:
user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix4.yaml')
user_kwargs['frame_size'] = kwargs['l3_imix4_size']
user_kwargs[rate_key] = total_rate * imix4_weight / total_weight
streams_arr.append(STLHltStream(**user_kwargs))
return streams_arr
# packet generation
packet = generate_packet(**user_kwargs)
# stream generation
try:
rate_types_dict = {'rate_pps': 'pps', 'rate_bps': 'bps_L2', 'rate_percent': 'percentage'}
rate_stateless = {rate_types_dict[rate_key]: float(kwargs[rate_key])}
transmit_mode = kwargs['transmit_mode']
pkts_per_burst = kwargs['pkts_per_burst']
if transmit_mode == 'continuous':
transmit_mode_obj = STLTXCont(**rate_stateless)
elif transmit_mode == 'single_burst':
transmit_mode_obj = STLTXSingleBurst(total_pkts = pkts_per_burst, **rate_stateless)
elif transmit_mode == 'multi_burst':
transmit_mode_obj = STLTXMultiBurst(pkts_per_burst = pkts_per_burst, count = int(kwargs['burst_loop_count']),
ibg = kwargs['inter_burst_gap'], **rate_stateless)
else:
raise STLError('transmit_mode %s not supported/implemented')
except Exception as e:
raise STLError('Could not create transmit_mode object %s: %s' % (transmit_mode, format_error(e)))
try:
if kwargs['l3_protocol'] in ('ipv4', 'ipv6') and not kwargs['disable_flow_stats']:
pg_id = kwargs.get('pg_id', kwargs.get('flow_stats_id'))
else:
pg_id = None
stream = STLStream(packet = packet,
random_seed = 1 if is_true(kwargs['consistent_random']) else 0,
#enabled = True,
#self_start = True,
flow_stats = STLFlowStats(pg_id) if pg_id else None,
mode = transmit_mode_obj,
)
except Exception as e:
raise STLError('Could not create stream: %s' % format_error(e))
debug_filename = kwargs.get('save_to_yaml')
if type(debug_filename) is str:
print('saving to %s' % debug_filename)
stream.dump_to_yaml(debug_filename)
return stream
packet_cache = LRU_cache(maxlen = 20)
def generate_packet(**user_kwargs):
correct_macs(user_kwargs)
if repr(user_kwargs) in packet_cache:
return packet_cache[repr(user_kwargs)]
kwargs = merge_kwargs(traffic_config_kwargs, user_kwargs)
correct_sizes(kwargs) # we are producing the packet - 4 bytes fcs
correct_direction(kwargs, kwargs)
vm_cmds = []
vm_variables_cache = {} # we will keep in cache same variables (inc/dec, var size in bytes, number of steps, step)
fix_ipv4_checksum = False
### L2 ###
if kwargs['l2_encap'] in ('ethernet_ii', 'ethernet_ii_vlan'):
#fields_desc = [ MACField("dst","00:00:00:01:00:00"),
# MACField("src","00:00:00:02:00:00"),
# XShortEnumField("type", 0x9000, ETHER_TYPES) ]
if kwargs['ignore_macs']: # workaround for lack of ARP
kwargs['mac_src'] = None
kwargs['mac_dst'] = None
kwargs['mac_src_mode'] = 'fixed'
kwargs['mac_dst_mode'] = 'fixed'
ethernet_kwargs = {}
if kwargs['mac_src']:
ethernet_kwargs['src'] = kwargs['mac_src']
if kwargs['mac_dst']:
ethernet_kwargs['dst'] = kwargs['mac_dst']
l2_layer = Ether(**ethernet_kwargs)
# Eth VM, change only 32 lsb
if kwargs['mac_src_mode'] != 'fixed':
count = int(kwargs['mac_src_count']) - 1
if count < 0:
raise STLError('mac_src_count has to be at least 1')
if count > 0 or kwargs['mac_src_mode'] == 'random':
mac_src = ipv4_str_to_num(mac2str(kwargs['mac_src'])[2:]) # take only 32 lsb
step = kwargs['mac_src_step']
if type(step) is str:
step = ipv4_str_to_num(mac2str(step)[2:]) # take only 32 lsb
if step < 1:
raise STLError('mac_src_step has to be at least 1')
if kwargs['mac_src_mode'] == 'increment':
add_val = mac_src - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
min_value = 0x7fffffff,
max_value = 0x7fffffff + count * step))
vm_variables_cache[var_name] = True
elif kwargs['mac_src_mode'] == 'decrement':
add_val = mac_src - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
min_value = 0x7fffffff - count * step,
max_value = 0x7fffffff))
vm_variables_cache[var_name] = True
elif kwargs['mac_src_mode'] == 'random':
add_val = 0
var_name = 'mac_src_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
else:
raise STLError('mac_src_mode %s is not supported' % kwargs['mac_src_mode'])
vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'Ethernet.src', offset_fixup = 2, add_val = add_val))
if kwargs['mac_dst_mode'] != 'fixed':
count = int(kwargs['mac_dst_count']) - 1
if count < 0:
raise STLError('mac_dst_count has to be at least 1')
if count > 0 or kwargs['mac_dst_mode'] == 'random':
mac_dst = ipv4_str_to_num(mac2str(kwargs['mac_dst'])[2:]) # take only 32 lsb
step = kwargs['mac_dst_step']
if type(step) is str:
step = ipv4_str_to_num(mac2str(step)[2:]) # take only 32 lsb
if step < 1:
raise STLError('mac_dst_step has to be at least 1')
if kwargs['mac_dst_mode'] == 'increment':
add_val = mac_dst - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
min_value = 0x7fffffff,
max_value = 0x7fffffff + count * step))
vm_variables_cache[var_name] = True
elif kwargs['mac_dst_mode'] == 'decrement':
add_val = mac_dst - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
min_value = 0x7fffffff - count * step,
max_value = 0x7fffffff))
vm_variables_cache[var_name] = True
elif kwargs['mac_dst_mode'] == 'random':
add_val = 0
var_name = 'mac_dst_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
else:
raise STLError('mac_dst_mode %s is not supported' % kwargs['mac_dst_mode'])
vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'Ethernet.dst', offset_fixup = 2, add_val = add_val))
if kwargs['l2_encap'] == 'ethernet_ii_vlan' or (kwargs['l2_encap'] == 'ethernet_ii' and vlan_in_args(user_kwargs)):
#fields_desc = [ BitField("prio", 0, 3),
# BitField("id", 0, 1),
# BitField("vlan", 1, 12),
# XShortEnumField("type", 0x0000, ETHER_TYPES) ]
for i, vlan_kwargs in enumerate(split_vlan_args(kwargs)):
vlan_id = int(vlan_kwargs['vlan_id'])
dot1q_kwargs = {'prio': int(vlan_kwargs['vlan_user_priority']),
'vlan': vlan_id,
'id': int(vlan_kwargs['vlan_cfi'])}
vlan_protocol_tag_id = vlan_kwargs['vlan_protocol_tag_id']
if vlan_protocol_tag_id is not None:
try: # to convert str to int
vlan_protocol_tag_id = int(vlan_protocol_tag_id, 16)
except TypeError:
pass
ALLOWED_VLAN_PROTO = [0x8100, 0x88A8, 0x9100, 0x9200, 0x9300]
if vlan_protocol_tag_id not in ALLOWED_VLAN_PROTO:
raise STLError('vlan_protocol_tag_id argument(s) must be one of the following values: %s, got: 0x%04x' %
(', '.join(map('0x{:04x}'.format, ALLOWED_VLAN_PROTO)), vlan_protocol_tag_id))
l2_layer.lastlayer().type = vlan_protocol_tag_id
l2_layer /= Dot1Q(**dot1q_kwargs)
# vlan VM
vlan_id_mode = vlan_kwargs['vlan_id_mode']
if vlan_id_mode != 'fixed':
count = int(vlan_kwargs['vlan_id_count']) - 1
if count < 0:
raise STLError('vlan_id_count has to be at least 1')
if count > 0 or vlan_id_mode == 'random':
var_name = 'vlan_id%s' % i
step = int(vlan_kwargs['vlan_id_step'])
if step < 1:
raise STLError('vlan_id_step has to be at least 1')
if vlan_id_mode == 'increment':
add_val = vlan_id - 0x7fff
var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
min_value = 0x7fff,
max_value = 0x7fff + count * step))
vm_variables_cache[var_name] = True
elif vlan_id_mode == 'decrement':
add_val = vlan_id - 0x7fff
var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
min_value = 0x7fff - count * step,
max_value = 0x7fff))
vm_variables_cache[var_name] = True
elif vlan_id_mode == 'random':
add_val = 0
var_name = 'vlan_id_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
else:
raise STLError('vlan_id_mode %s is not supported' % vlan_id_mode)
vm_cmds.append(STLVmWrMaskFlowVar(fv_name = var_name, pkt_offset = '802|1Q:%s.vlan' % i,
pkt_cast_size = 2, mask = 0xfff, add_value = add_val))
else:
raise NotImplementedError("l2_encap does not support the desired encapsulation '%s'" % kwargs['l2_encap'])
base_pkt = l2_layer
### L3 ###
if kwargs['l3_protocol'] is None:
l3_layer = None
elif kwargs['l3_protocol'] == 'ipv4':
#fields_desc = [ BitField("version" , 4 , 4),
# BitField("ihl", None, 4),
# XByteField("tos", 0),
# ShortField("len", None),
# ShortField("id", 1),
# FlagsField("flags", 0, 3, ["MF","DF","evil"]),
# BitField("frag", 0, 13),
# ByteField("ttl", 64),
# ByteEnumField("proto", 0, IP_PROTOS),
# XShortField("chksum", None),
# Emph(IPField("src", "16.0.0.1")),
# Emph(IPField("dst", "48.0.0.1")),
# PacketListField("options", [], IPOption, length_from=lambda p:p.ihl*4-20) ]
ip_tos = get_TOS(user_kwargs, kwargs)
if ip_tos < 0 or ip_tos > 255:
raise STLError('TOS %s is not in range 0-255' % ip_tos)
l3_layer = IP(tos = ip_tos,
#len = kwargs['l3_length'], don't let user create corrupt packets
id = kwargs['ip_id'],
frag = kwargs['ip_fragment_offset'],
ttl = kwargs['ip_ttl'],
chksum = kwargs['ip_checksum'],
src = kwargs['ip_src_addr'],
dst = kwargs['ip_dst_addr'],
)
# IPv4 VM
if kwargs['ip_src_mode'] != 'fixed':
count = int(kwargs['ip_src_count']) - 1
if count < 0:
raise STLError('ip_src_count has to be at least 1')
if count > 0 or kwargs['ip_src_mode'] == 'random':
fix_ipv4_checksum = True
ip_src_addr = kwargs['ip_src_addr']
if type(ip_src_addr) is str:
ip_src_addr = ipv4_str_to_num(is_valid_ipv4_ret(ip_src_addr))
step = kwargs['ip_src_step']
if type(step) is str:
step = ipv4_str_to_num(is_valid_ipv4_ret(step))
if step < 1:
raise STLError('ip_src_step has to be at least 1')
if kwargs['ip_src_mode'] == 'increment':
add_val = ip_src_addr - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
min_value = 0x7fffffff,
max_value = 0x7fffffff + count * step))
vm_variables_cache[var_name] = True
elif kwargs['ip_src_mode'] == 'decrement':
add_val = ip_src_addr - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
min_value = 0x7fffffff - count * step,
max_value = 0x7fffffff))
vm_variables_cache[var_name] = True
elif kwargs['ip_src_mode'] == 'random':
add_val = 0
var_name = 'ip_src_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
else:
raise STLError('ip_src_mode %s is not supported' % kwargs['ip_src_mode'])
vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IP.src', add_val = add_val))
if kwargs['ip_dst_mode'] != 'fixed':
count = int(kwargs['ip_dst_count']) - 1
if count < 0:
raise STLError('ip_dst_count has to be at least 1')
if count > 0 or kwargs['ip_dst_mode'] == 'random':
fix_ipv4_checksum = True
ip_dst_addr = kwargs['ip_dst_addr']
if type(ip_dst_addr) is str:
ip_dst_addr = ipv4_str_to_num(is_valid_ipv4_ret(ip_dst_addr))
step = kwargs['ip_dst_step']
if type(step) is str:
step = ipv4_str_to_num(is_valid_ipv4_ret(step))
if step < 1:
raise STLError('ip_dst_step has to be at least 1')
if kwargs['ip_dst_mode'] == 'increment':
add_val = ip_dst_addr - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
min_value = 0x7fffffff,
max_value = 0x7fffffff + count * step))
vm_variables_cache[var_name] = True
elif kwargs['ip_dst_mode'] == 'decrement':
add_val = ip_dst_addr - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
min_value = 0x7fffffff - count * step,
max_value = 0x7fffffff))
vm_variables_cache[var_name] = True
elif kwargs['ip_dst_mode'] == 'random':
add_val = 0
var_name = 'ip_dst_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
else:
raise STLError('ip_dst_mode %s is not supported' % kwargs['ip_dst_mode'])
vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IP.dst', add_val = add_val))
elif kwargs['l3_protocol'] == 'ipv6':
#fields_desc = [ BitField("version" , 6 , 4),
# BitField("tc", 0, 8), #TODO: IPv6, ByteField ?
# BitField("fl", 0, 20),
# ShortField("plen", None),
# ByteEnumField("nh", 59, ipv6nh),
# ByteField("hlim", 64),
# IP6Field("dst", "::2"),
# #SourceIP6Field("src", "dst"), # dst is for src @ selection
# IP6Field("src", "::1") ]
ipv6_kwargs = {'tc': kwargs['ipv6_traffic_class'],
'fl': kwargs['ipv6_flow_label'],
'plen': kwargs['ipv6_length'],
'hlim': kwargs['ipv6_hop_limit'],
'src': kwargs['ipv6_src_addr'],
'dst': kwargs['ipv6_dst_addr']}
if kwargs['ipv6_next_header'] is not None:
ipv6_kwargs['nh'] = kwargs['ipv6_next_header']
l3_layer = IPv6(**ipv6_kwargs)
# IPv6 VM, change only 32 lsb
if kwargs['ipv6_src_mode'] != 'fixed':
count = int(kwargs['ipv6_src_count']) - 1
if count < 0:
raise STLError('ipv6_src_count has to be at least 1')
if count > 0 or kwargs['ipv6_src_mode'] == 'random':
ipv6_src_addr_num = ipv4_str_to_num(is_valid_ipv6_ret(kwargs['ipv6_src_addr'])[-4:])
step = kwargs['ipv6_src_step']
if type(step) is str: # convert ipv6 step to number
step = ipv4_str_to_num(is_valid_ipv6_ret(step)[-4:])
if step < 1:
raise STLError('ipv6_src_step has to be at least 1')
if kwargs['ipv6_src_mode'] == 'increment':
add_val = ipv6_src_addr_num - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
min_value = 0x7fffffff,
max_value = 0x7fffffff + count * step))
vm_variables_cache[var_name] = True
elif kwargs['ipv6_src_mode'] == 'decrement':
add_val = ipv6_src_addr_num - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
min_value = 0x7fffffff - count * step,
max_value = 0x7fffffff))
vm_variables_cache[var_name] = True
elif kwargs['ipv6_src_mode'] == 'random':
add_val = 0
var_name = 'ipv6_src_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
else:
raise STLError('ipv6_src_mode %s is not supported' % kwargs['ipv6_src_mode'])
vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IPv6.src', offset_fixup = 12, add_val = add_val))
if kwargs['ipv6_dst_mode'] != 'fixed':
count = int(kwargs['ipv6_dst_count']) - 1
if count < 0:
raise STLError('ipv6_dst_count has to be at least 1')
if count > 0 or kwargs['ipv6_dst_mode'] == 'random':
ipv6_dst_addr_num = ipv4_str_to_num(is_valid_ipv6_ret(kwargs['ipv6_dst_addr'])[-4:])
step = kwargs['ipv6_dst_step']
if type(step) is str: # convert ipv6 step to number
step = ipv4_str_to_num(is_valid_ipv6_ret(step)[-4:])
if step < 1:
raise STLError('ipv6_dst_step has to be at least 1')
if kwargs['ipv6_dst_mode'] == 'increment':
add_val = ipv6_dst_addr_num - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
min_value = 0x7fffffff,
max_value = 0x7fffffff + count * step))
vm_variables_cache[var_name] = True
elif kwargs['ipv6_dst_mode'] == 'decrement':
add_val = ipv6_dst_addr_num - 0x7fffffff
var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
min_value = 0x7fffffff - count * step,
max_value = 0x7fffffff))
vm_variables_cache[var_name] = True
elif kwargs['ipv6_dst_mode'] == 'random':
add_val = 0
var_name = 'ipv6_dst_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
else:
raise STLError('ipv6_dst_mode %s is not supported' % kwargs['ipv6_dst_mode'])
vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IPv6.dst', offset_fixup = 12, add_val = add_val))
elif kwargs['l3_protocol'] is not None:
raise NotImplementedError("l3_protocol '%s' is not supported by TRex yet." % kwargs['l3_protocol'])
if l3_layer is not None:
base_pkt /= l3_layer
### L4 ###
l4_layer = None
if kwargs['l4_protocol'] == 'tcp':
assert kwargs['l3_protocol'] in ('ipv4', 'ipv6'), 'TCP must be over ipv4/ipv6'
#fields_desc = [ ShortEnumField("sport", 20, TCP_SERVICES),
# ShortEnumField("dport", 80, TCP_SERVICES),
# IntField("seq", 0),
# IntField("ack", 0),
# BitField("dataofs", None, 4),
# BitField("reserved", 0, 4),
# FlagsField("flags", 0x2, 8, "FSRPAUEC"),
# ShortField("window", 8192),
# XShortField("chksum", None),
# ShortField("urgptr", 0),
# TCPOptionsField("options", {}) ]
tcp_flags = ('F' if kwargs['tcp_fin_flag'] else '' +
'S' if kwargs['tcp_syn_flag'] else '' +
'R' if kwargs['tcp_rst_flag'] else '' +
'P' if kwargs['tcp_psh_flag'] else '' +
'A' if kwargs['tcp_ack_flag'] else '' +
'U' if kwargs['tcp_urg_flag'] else '')
l4_layer = TCP(sport = kwargs['tcp_src_port'],
dport = kwargs['tcp_dst_port'],
seq = kwargs['tcp_seq_num'],
ack = kwargs['tcp_ack_num'],
dataofs = kwargs['tcp_data_offset'],
flags = tcp_flags,
window = kwargs['tcp_window'],
chksum = kwargs['tcp_checksum'],
urgptr = kwargs['tcp_urgent_ptr'],
)
# TCP VM
if kwargs['tcp_src_port_mode'] != 'fixed':
count = int(kwargs['tcp_src_port_count']) - 1
if count < 0:
raise STLError('tcp_src_port_count has to be at least 1')
if count > 0 or kwargs['tcp_src_port_mode'] == 'random':
fix_ipv4_checksum = True
step = kwargs['tcp_src_port_step']
if step < 1:
raise STLError('tcp_src_port_step has to be at least 1')
if kwargs['tcp_src_port_mode'] == 'increment':
add_val = kwargs['tcp_src_port'] - 0x7fff
var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
min_value = 0x7fff,
max_value = 0x7fff + count * step))
vm_variables_cache[var_name] = True
elif kwargs['tcp_src_port_mode'] == 'decrement':
add_val = kwargs['tcp_src_port'] - 0x7fff
var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
min_value = 0x7fff - count * step,
max_value = 0x7fff))
vm_variables_cache[var_name] = True
elif kwargs['tcp_src_port_mode'] == 'random':
add_val = 0
var_name = 'tcp_src_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
else:
raise STLError('tcp_src_port_mode %s is not supported' % kwargs['tcp_src_port_mode'])
vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'TCP.sport', add_val = add_val))
if kwargs['tcp_dst_port_mode'] != 'fixed':
count = int(kwargs['tcp_dst_port_count']) - 1
if count < 0:
raise STLError('tcp_dst_port_count has to be at least 1')
if count > 0 or kwargs['tcp_dst_port_mode'] == 'random':
fix_ipv4_checksum = True
step = kwargs['tcp_dst_port_step']
if step < 1:
raise STLError('tcp_dst_port_step has to be at least 1')
if kwargs['tcp_dst_port_mode'] == 'increment':
add_val = kwargs['tcp_dst_port'] - 0x7fff
var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
min_value = 0x7fff,
max_value = 0x7fff + count * step))
vm_variables_cache[var_name] = True
elif kwargs['tcp_dst_port_mode'] == 'decrement':
add_val = kwargs['tcp_dst_port'] - 0x7fff
var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
min_value = 0x7fff - count * step,
max_value = 0x7fff))
vm_variables_cache[var_name] = True
elif kwargs['tcp_dst_port_mode'] == 'random':
add_val = 0
var_name = 'tcp_dst_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
else:
raise STLError('tcp_dst_port_mode %s is not supported' % kwargs['tcp_dst_port_mode'])
vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'TCP.dport', add_val = add_val))
elif kwargs['l4_protocol'] == 'udp':
assert kwargs['l3_protocol'] in ('ipv4', 'ipv6'), 'UDP must be over ipv4/ipv6'
#fields_desc = [ ShortEnumField("sport", 53, UDP_SERVICES),
# ShortEnumField("dport", 53, UDP_SERVICES),
# ShortField("len", None),
# XShortField("chksum", None), ]
l4_layer = UDP(sport = kwargs['udp_src_port'],
dport = kwargs['udp_dst_port'],
len = kwargs['udp_length'], chksum = None)
# UDP VM
if kwargs['udp_src_port_mode'] != 'fixed':
count = int(kwargs['udp_src_port_count']) - 1
if count < 0:
raise STLError('udp_src_port_count has to be at least 1')
if count > 0 or kwargs['udp_src_port_mode'] == 'random':
fix_ipv4_checksum = True
step = kwargs['udp_src_port_step']
if step < 1:
raise STLError('udp_src_port_step has to be at least 1')
if kwargs['udp_src_port_mode'] == 'increment':
add_val = kwargs['udp_src_port'] - 0x7fff
var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
min_value = 0x7fff,
max_value = 0x7fff + count * step))
vm_variables_cache[var_name] = True
elif kwargs['udp_src_port_mode'] == 'decrement':
add_val = kwargs['udp_src_port'] - 0x7fff
var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
min_value = 0x7fff - count * step,
max_value = 0x7fff))
vm_variables_cache[var_name] = True
elif kwargs['udp_src_port_mode'] == 'random':
add_val = 0
var_name = 'udp_src_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
else:
raise STLError('udp_src_port_mode %s is not supported' % kwargs['udp_src_port_mode'])
vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'UDP.sport', add_val = add_val))
if kwargs['udp_dst_port_mode'] != 'fixed':
count = int(kwargs['udp_dst_port_count']) - 1
if count < 0:
raise STLError('udp_dst_port_count has to be at least 1')
if count > 0 or kwargs['udp_dst_port_mode'] == 'random':
fix_ipv4_checksum = True
step = kwargs['udp_dst_port_step']
if step < 1:
raise STLError('udp_dst_port_step has to be at least 1')
if kwargs['udp_dst_port_mode'] == 'increment':
add_val = kwargs['udp_dst_port'] - 0x7fff
var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
min_value = 0x7fff,
max_value = 0x7fff + count * step))
elif kwargs['udp_dst_port_mode'] == 'decrement':
add_val = kwargs['udp_dst_port'] - 0x7fff
var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
if var_name not in vm_variables_cache:
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
min_value = 0x7fff - count * step,
max_value = 0x7fff))
elif kwargs['udp_dst_port_mode'] == 'random':
add_val = 0
var_name = 'udp_dst_random'
vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
else:
raise STLError('udp_dst_port_mode %s is not supported' % kwargs['udp_dst_port_mode'])
vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'UDP.dport', add_val = add_val))
elif kwargs['l4_protocol'] is not None:
raise NotImplementedError("l4_protocol '%s' is not supported by TRex yet." % kwargs['l4_protocol'])
if l4_layer is not None:
base_pkt /= l4_layer
trim_dict = {'increment': 'inc', 'decrement': 'dec', 'random': 'random'}
length_mode = kwargs['length_mode']
if length_mode == 'auto':
payload_len = 0
elif length_mode == 'fixed':
if 'frame_size' in user_kwargs: # L2 has higher priority over L3
payload_len = kwargs['frame_size'] - len(base_pkt)
elif 'l3_length' in user_kwargs:
payload_len = kwargs['l3_length'] - (len(base_pkt) - len(l2_layer))
else: # default
payload_len = kwargs['frame_size'] - len(base_pkt)
elif length_mode == 'imix':
raise STLError("length_mode 'imix' should be treated at stream creating level.")
elif length_mode in trim_dict:
if 'frame_size_min' in user_kwargs or 'frame_size_max' in user_kwargs: # size is determined by L2, higher priority over L3 size
if kwargs['frame_size_min'] < 44 or kwargs['frame_size_max'] < 44:
raise STLError('frame_size_min and frame_size_max should be at least 44')
if kwargs['frame_size_min'] > kwargs['frame_size_max']:
raise STLError('frame_size_min is bigger than frame_size_max')
if kwargs['frame_size_min'] != kwargs['frame_size_max']:
fix_ipv4_checksum = True
vm_cmds.append(STLVmFlowVar(name = 'pkt_len', size = 2, op = trim_dict[length_mode], step = kwargs['frame_size_step'],
min_value = kwargs['frame_size_min'],
max_value = kwargs['frame_size_max']))
vm_cmds.append(STLVmTrimPktSize('pkt_len'))
payload_len = kwargs['frame_size_max'] - len(base_pkt)
else: # size is determined by L3
if kwargs['l3_length_min'] < 40 or kwargs['l3_length_max'] < 40:
raise STLError('l3_length_min and l3_length_max should be at least 40')
if kwargs['l3_length_min'] > kwargs['l3_length_max']:
raise STLError('l3_length_min is bigger than l3_length_max')
if kwargs['l3_length_min'] != kwargs['l3_length_max']:
fix_ipv4_checksum = True
vm_cmds.append(STLVmFlowVar(name = 'pkt_len', size = 2, op = trim_dict[length_mode], step = kwargs['l3_length_step'],
min_value = kwargs['l3_length_min'] + len(l2_layer),
max_value = kwargs['l3_length_max'] + len(l2_layer)))
payload_len = kwargs['l3_length_max'] + len(l2_layer) - len(base_pkt)
vm_cmds.append(STLVmTrimPktSize('pkt_len'))
if (l3_layer and l3_layer.name == 'IP'):
vm_cmds.append(STLVmWrFlowVar(fv_name = 'pkt_len', pkt_offset = 'IP.len', add_val = -len(l2_layer)))
if (l4_layer and l4_layer.name == 'UDP'):
vm_cmds.append(STLVmWrFlowVar(fv_name = 'pkt_len', pkt_offset = 'UDP.len', add_val = -len(l2_layer) - len(l3_layer)))
else:
raise STLError('length_mode should be one of the following: %s' % ['auto', 'fixed'] + trim_dict.keys())
if payload_len < 0:
raise STLError('Packet length is bigger than defined by frame_size* or l3_length*. We got payload size %s' % payload_len)
base_pkt /= '!' * payload_len
pkt = STLPktBuilder()
pkt.set_packet(base_pkt)
if fix_ipv4_checksum and l3_layer.name == 'IP' and kwargs['ip_checksum'] is None:
vm_cmds.append(STLVmFixIpv4(offset = 'IP'))
if vm_cmds:
if kwargs['split_by_cores'] == 'split':
max_length = 0
for cmd in vm_cmds:
if isinstance(cmd, STLVmFlowVar):
if cmd.op not in ('inc', 'dec'):
continue
length = float(cmd.max_value - cmd.min_value) / cmd.step
if cmd.name == 'ip_src' and length > 7: # priority is to split by ip_src
break
if length > max_length:
max_length = length
elif kwargs['split_by_cores'] == 'single':
raise STLError("split_by_cores 'single' not implemented yet")
elif kwargs['split_by_cores'] != 'duplicate':
raise STLError("split_by_cores '%s' is not supported" % kwargs['split_by_cores'])
pkt.add_command(STLScVmRaw(vm_cmds))
# debug (only the base packet, without VM)
debug_filename = kwargs.get('save_to_pcap')
if type(debug_filename) is str:
pkt.dump_pkt_to_pcap(debug_filename)
packet_cache[repr(user_kwargs)] = pkt
return pkt
def get_TOS(user_kwargs, kwargs):
TOS0 = set(['ip_precedence', 'ip_tos_field', 'ip_mbz'])
TOS1 = set(['ip_precedence', 'ip_delay', 'ip_throughput', 'ip_reliability', 'ip_cost', 'ip_reserved'])
TOS2 = set(['ip_dscp', 'ip_cu'])
user_args = set(user_kwargs.keys())
if user_args & (TOS1 - TOS0) and user_args & (TOS0 - TOS1):
raise STLError('You have mixed %s and %s TOS parameters' % (TOS0, TOS1))
if user_args & (TOS2 - TOS0) and user_args & (TOS0 - TOS2):
raise STLError('You have mixed %s and %s TOS parameters' % (TOS0, TOS2))
if user_args & (TOS2 - TOS1) and user_args & (TOS1 - TOS2):
raise STLError('You have mixed %s and %s TOS parameters' % (TOS1, TOS2))
if user_args & (TOS0 - TOS1 - TOS2):
return (kwargs['ip_precedence'] << 5) + (kwargs['ip_tos_field'] << 1) + kwargs['ip_mbz']
if user_args & (TOS1 - TOS2):
return (kwargs['ip_precedence'] << 5) + (kwargs['ip_delay'] << 4) + (kwargs['ip_throughput'] << 3) + (kwargs['ip_reliability'] << 2) + (kwargs['ip_cost'] << 1) + kwargs['ip_reserved']
return (kwargs['ip_dscp'] << 2) + kwargs['ip_cu']
def vlan_in_args(user_kwargs):
for arg in user_kwargs:
if arg.startswith('vlan_'):
return True
return False
def split_vlan_arg(vlan_arg):
if type(vlan_arg) is list:
return vlan_arg
if is_integer(vlan_arg) or vlan_arg is None:
return [vlan_arg]
if type(vlan_arg) is str:
return vlan_arg.replace('{', '').replace('}', '').strip().split()
raise STLError('vlan argument invalid (expecting list, int, long, str, None): %s' % vlan_arg)
def split_vlan_args(kwargs):
vlan_args_dict = {}
for arg, value in kwargs.items():
if arg.startswith('vlan_'):
vlan_args_dict[arg] = split_vlan_arg(value)
dot1q_headers_count = max([len(x) for x in vlan_args_dict.values()])
vlan_args_per_header = [{} for _ in range(dot1q_headers_count)]
for arg, value in vlan_args_dict.items():
for i in range(dot1q_headers_count):
if len(value) > i:
vlan_args_per_header[i][arg] = value[i]
else:
vlan_args_per_header[i][arg] = traffic_config_kwargs[arg]
return vlan_args_per_header
def correct_direction(user_kwargs, kwargs):
if kwargs['direction'] == 0:
return
user_kwargs['mac_src'] = kwargs['mac_src2']
user_kwargs['mac_dst'] = kwargs['mac_dst2']
if kwargs['l3_protocol'] == 'ipv4':
for arg in kwargs.keys():
if 'ip_src_' in arg:
dst_arg = 'ip_dst_' + arg[7:]
user_kwargs[arg], user_kwargs[dst_arg] = kwargs[dst_arg], kwargs[arg]
elif kwargs['l3_protocol'] == 'ipv6':
for arg in kwargs.keys():
if 'ipv6_src_' in arg:
dst_arg = 'ipv6_dst_' + arg[9:]
user_kwargs[arg], user_kwargs[dst_arg] = kwargs[dst_arg], kwargs[arg]
# we produce packets without FCS, so need to reduce sizes of L2 arguments
def correct_sizes(kwargs):
for arg, value in kwargs.items():
if is_integer(value):
if arg.endswith(('_size', '_size_min', '_size_max')):
kwargs[arg] -= 4
| {
"content_hash": "548b112b35275ac8d18881a1c19b310d",
"timestamp": "",
"source": "github",
"line_count": 1613,
"max_line_length": 225,
"avg_line_length": 52.32734035957843,
"alnum_prop": 0.49880337424766596,
"repo_name": "dimagol/trex-core",
"id": "964dd617d74c7541b50eee221c9156f50410c299",
"size": "84426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "16544616"
},
{
"name": "C++",
"bytes": "4926141"
},
{
"name": "CMake",
"bytes": "8882"
},
{
"name": "CSS",
"bytes": "333"
},
{
"name": "HTML",
"bytes": "5012"
},
{
"name": "JavaScript",
"bytes": "1234"
},
{
"name": "Makefile",
"bytes": "164932"
},
{
"name": "Python",
"bytes": "13495826"
},
{
"name": "Shell",
"bytes": "22288"
}
],
"symlink_target": ""
} |
"""Resolve JSON for OpenAIRE grants."""
from __future__ import absolute_import, print_function
import jsonresolver
from invenio_pidstore.resolver import Resolver
from invenio_records.api import Record
from werkzeug.routing import Rule
def resolve_grant_endpoint(doi_grant_code):
"""Resolve the OpenAIRE grant."""
# jsonresolver will evaluate current_app on import if outside of function.
from flask import current_app
pid_value = '10.13039/{0}'.format(doi_grant_code)
try:
_, record = Resolver(pid_type='grant', object_type='rec',
getter=Record.get_record).resolve(pid_value)
return record
except Exception:
current_app.logger.error(
'Grant {0} does not exists.'.format(pid_value), exc_info=True)
raise
@jsonresolver.hookimpl
def jsonresolver_loader(url_map):
"""Resolve the OpenAIRE grant."""
from flask import current_app
url_map.add(Rule(
'/grants/10.13039/<path:doi_grant_code>',
endpoint=resolve_grant_endpoint,
host=current_app.config['OPENAIRE_JSONRESOLVER_GRANTS_HOST']))
| {
"content_hash": "e8e6df37998ab58705fb24516af8f7d0",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 33.90909090909091,
"alnum_prop": 0.6791778373547811,
"repo_name": "inveniosoftware/invenio-openaire",
"id": "33b8a9429aa88e6ec10b7e642ecbb23e474984a2",
"size": "1354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_openaire/resolvers/grants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77471"
},
{
"name": "Shell",
"bytes": "466"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import login, logout
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^login/?$', login, dict(template_name='login.html'), name='login'),
url(r'^logout/$', logout, {'next_page': '/'}, name='logout'),
url(r'', include('attendance.urls')),
]
| {
"content_hash": "a72c5ee98a9fad96bcc9a5c086ff3534",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 77,
"avg_line_length": 28.692307692307693,
"alnum_prop": 0.6568364611260054,
"repo_name": "seccom-ufsc/hertz",
"id": "0b2130d855472dead01c65cbd6cadd1e4a21e1a1",
"size": "373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hertz/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8943"
},
{
"name": "Python",
"bytes": "13670"
},
{
"name": "Shell",
"bytes": "125"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import router
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allowed_to_migrate(self, connection_alias, model, hints=None):
"""
Returns if we're allowed to migrate the model.
"""
# Always skip if proxy, swapped out, or unmanaged.
if model and (model._meta.proxy or model._meta.swapped or not model._meta.managed):
return False
return router.allow_migrate(connection_alias, model, **(hints or {}))
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
| {
"content_hash": "ea72eb4f318f37cccf3f30efa3ceed97",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 103,
"avg_line_length": 37.085470085470085,
"alnum_prop": 0.6388568794653146,
"repo_name": "memtoko/django",
"id": "557c956732fd9e70382ec6782508ee3688496ba0",
"size": "4339",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/migrations/operations/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52479"
},
{
"name": "JavaScript",
"bytes": "106009"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10489436"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
import numpy
from copy import deepcopy
visitedStates = []
nodeNumber = 0
class State():
"""Lol"""
def __init__(self, floor, genM, chM):
self.floor = floor
self.genM = genM
self.chM = chM
self.pairVector = numpy.sum(numpy.logical_and(genM, chM), 1)
self.singleChipVector = numpy.sum(chM, 1) - self.pairVector
self.singleGenVector = numpy.sum(genM, 1) - self.pairVector
self.singleAllVector = self.singleChipVector + self.singleGenVector
def __eq__(self, other):
# return ((self.pairVector == other.pairVector).all() and
# (self.singleChipVector == other.singleChipVector).all() and
# (self.singleGenVector == other.singleGenVector).all())
return ((self.singleAllVector == other.singleAllVector).all() and
#(self.pairVector == other.pairVector).all() and
(self.singleChipVector == other.singleChipVector).all() and
# (self.genM == other.genM).all() and
# (self.chM == other.chM).all() and
(self.singleGenVector == other.singleGenVector).all() and
(self.floor == other.floor)) # and
# (self.singleGenVector == other.singleGenVector).all())
class Node():
def __init__(self, depth, floor, genM, chM, parentNode):
self.depth = depth
self.floor = floor
self.genM = deepcopy(genM)
self.chM = deepcopy(chM)
self.visited = False
if parentNode is not None:
self.parentNode = parentNode
else:
self.parentNode = self
global nodeNumber
self.nodeNumber = nodeNumber
nodeNumber += 1
self.childs = []
# print('New node: # {}'.format(self.nodeNumber))
# visitedMatrices.append(self.genM+self.chM)
if numpy.sum(self.chM) + numpy.sum(self.genM) != 14:
print(self.nodeNumber, self.floor)
print('genM, chM')
print(self.genM)
print(self.chM)
raise Exception("something is wrong!")
if self.isWinner():
raise Exception("found a winner at it. {}".format(self.depth))
def isSafe(self):
chipsWithProtection = numpy.multiply(self.genM, self.chM)
chipsWithoutProtection = self.chM-chipsWithProtection
vulnerableChipNumberPerFloor = numpy.sum(chipsWithoutProtection, 1)
generatorNumberPerFloor = numpy.sum(self.genM, 1)
if self.nodeNumber == 2:
# print(' node {} info: '.format(self.nodeNumber))
# print(self.genM)
# print(self.chM)
# input()
pass
if ((numpy.multiply(vulnerableChipNumberPerFloor,
generatorNumberPerFloor)).any()):
return False
else:
if (self.floor == self.parentNode.parentNode.floor and
(self.chM == self.parentNode.parentNode.chM).all() and
(self.genM == self.parentNode.parentNode.genM).all()):
# print('bop')
return False
aState = State(self.floor, self.genM, self.chM)
if aState in visitedStates:
return False
else:
visitedStates.append(aState)
return True
def isWinner(self):
return (numpy.sum(self.chM, 1)[3] == 7 and
numpy.sum(self.genM, 1)[3] == 7)
def grow(self):
if not self.visited:
possibleFloors = [self.floor+1, self.floor-1]
if possibleFloors[0] > 3:
possibleFloors.pop(0)
elif possibleFloors[1] < 0:
possibleFloors.pop(1)
# 1: try to move only a generator or two
for pos1 in range(7):
for pos2 in range(pos1, 7):
genPosVec = numpy.logical_or(numpy.array(range(7)) == pos1,
numpy.array(range(7)) == pos2)
genPosVec = genPosVec*1
if (numpy.sum(numpy.multiply(genPosVec,
self.genM[self.floor]))
== numpy.sum(genPosVec)).all():
subGenM = self.genM
# print(self.genM)
subGenM[self.floor] = self.genM[self.floor]-genPosVec
# print(subGenM)
for floor in possibleFloors:
subGenM[floor] = self.genM[floor]+genPosVec
newChild = Node(self.depth+1, floor, subGenM,
self.chM, self)
if newChild.isSafe():
self.childs.append(newChild)
subGenM[floor] = self.genM[floor]-genPosVec
subGenM[self.floor] = self.genM[self.floor]+genPosVec
# 2: try to move only a chip
for pos1 in range(7):
for pos2 in range(pos1, 7):
chPosVec = (numpy.array(range(7)) == pos1)*1
chPosVec = numpy.logical_or(numpy.array(range(7)) == pos1,
numpy.array(range(7)) == pos2)
chPosVec = chPosVec*1
if (numpy.sum(numpy.multiply(chPosVec,
self.chM[self.floor]))
== numpy.sum(chPosVec)).all():
subChM = self.chM
# print(self.genM)
subChM[self.floor] = self.chM[self.floor]-chPosVec
# print(subGenM)
for floor in possibleFloors:
subChM[floor] = self.chM[floor]+chPosVec
newChild = Node(self.depth+1, floor, self.genM,
subChM, self)
if newChild.isSafe():
self.childs.append(newChild)
subChM[floor] = self.chM[floor]-chPosVec
subChM[self.floor] = self.chM[self.floor]+chPosVec
# 3: try to move a chip and a generator
for pos1 in range(7):
for pos2 in range(7):
genPosVec = (numpy.array(range(7)) == pos1)*1
chPosVec = (numpy.array(range(7)) == pos2)*1
if (numpy.multiply(genPosVec,
self.genM[self.floor]).any() and
numpy.multiply(chPosVec, self.chM[self.floor]).any()):
subGenM = self.genM
subChM = self.chM
subGenM[self.floor] = self.genM[self.floor]-genPosVec
subChM[self.floor] = self.chM[self.floor]-chPosVec
for floor in possibleFloors:
subGenM[floor] = self.genM[floor]+genPosVec
subChM[floor] = self.chM[floor]+chPosVec
newChild = Node(self.depth+1, floor, subGenM,
subChM, self)
if newChild.isSafe():
self.childs.append(newChild)
subGenM[floor] = self.genM[floor]-genPosVec
subChM[floor] = self.chM[floor]-chPosVec
subGenM[self.floor] = self.genM[self.floor]+genPosVec
subChM[self.floor] = self.chM[self.floor]+chPosVec
global treatedNode
treatedNode += 1
# print('finished treating node {}'.format(self.nodeNumber))
# print('number of children: {}'.format(len(self.childs)))
# print('parent node: {}'.format(self.parentNode.nodeNumber))
# print(self.genM)
# print(self.chM)
self.visited = True
else:
# print("I am node {} with {} childs".format(self.nodeNumber,
# len(self.childs)))
# print(self.genM)
# print(self.chM)
for child in self.childs:
child.grow()
# promethium, cobalt, curium, ruthenium, plutonium
theInputGenerators = [[True, False, False, False, False, True, True],
[False, True, True, True, True, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False]]
theInputChips = [[True, False, False, False, False, True, True],
[False, False, False, False, False, False, False],
[False, True, True, True, True, False, False],
[False, False, False, False, False, False, False]]
tIG = numpy.array(theInputGenerators, dtype=int)
tIC = numpy.array(theInputChips, dtype=int)
currentFloor = 1
rootNode = Node(0, 0, tIG, tIC, None)
treatedNode = 0
for i in range(60):
print('iteration {}'.format(i))
treatedNode = 0
rootNode.grow()
if treatedNode == 0:
print('done at iteration {}'.format(i))
break
| {
"content_hash": "71c8a82983bf83298f698879c60af26e",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 44.27272727272727,
"alnum_prop": 0.499729817356533,
"repo_name": "znuxor/aoc2016",
"id": "07a6d3b4540db7c8c813dc8c69660b4a517d8d73",
"size": "9276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "11b.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "566855"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import re
import kconfiglib
# Indentation to be used in the generated file
INDENT = ' '
# Characters used when underlining section heading
HEADING_SYMBOLS = '#*=-^"+'
# Keep the heading level in sync with api-reference/kconfig.rst
INITIAL_HEADING_LEVEL = 3
MAX_HEADING_LEVEL = len(HEADING_SYMBOLS) - 1
def write_docs(config, filename):
""" Note: writing .rst documentation ignores the current value
of any items. ie the --config option can be ignored.
(However at time of writing it still needs to be set to something...) """
with open(filename, "w") as f:
config.walk_menu(lambda node: write_menu_item(f, node))
def node_is_menu(node):
try:
return node.item == kconfiglib.MENU or node.is_menuconfig
except AttributeError:
return False # not all MenuNodes have is_menuconfig for some reason
def get_breadcrumbs(node):
# this is a bit wasteful as it recalculates each time, but still...
result = []
node = node.parent
while node.parent:
if node.prompt:
result = [":ref:`%s`" % get_link_anchor(node)] + result
node = node.parent
return " > ".join(result)
def get_link_anchor(node):
try:
return "CONFIG_%s" % node.item.name
except AttributeError:
assert(node_is_menu(node)) # only menus should have no item.name
# for menus, build a link anchor out of the parents
result = []
while node.parent:
if node.prompt:
result = [re.sub(r"[^a-zA-z0-9]+", "-", node.prompt[0])] + result
node = node.parent
result = "-".join(result).lower()
return result
def get_heading_level(node):
result = INITIAL_HEADING_LEVEL
node = node.parent
while node.parent:
result += 1
if result == MAX_HEADING_LEVEL:
return MAX_HEADING_LEVEL
node = node.parent
return result
def format_rest_text(text, indent):
# Format an indented text block for use with ReST
text = indent + text.replace('\n', '\n' + indent)
# Escape some characters which are inline formatting in ReST
text = text.replace("*", "\\*")
text = text.replace("_", "\\_")
# replace absolute links to documentation by relative ones
text = re.sub(r'https://docs.espressif.com/projects/esp-idf/\w+/\w+/(.+)\.html', r':doc:`../\1`', text)
text += '\n'
return text
def node_should_write(node):
if not node.prompt:
return False # Don't do anything for invisible menu items
if isinstance(node.parent.item, kconfiglib.Choice):
return False # Skip choice nodes, they are handled as part of the parent (see below)
return True
def write_menu_item(f, node):
if not node_should_write(node):
return
try:
name = node.item.name
except AttributeError:
name = None
is_menu = node_is_menu(node)
# Heading
if name:
title = 'CONFIG_%s' % name
else:
# if no symbol name, use the prompt as the heading
title = node.prompt[0]
f.write(".. _%s:\n\n" % get_link_anchor(node))
f.write('%s\n' % title)
f.write(HEADING_SYMBOLS[get_heading_level(node)] * len(title))
f.write('\n\n')
if name:
f.write('%s%s\n\n' % (INDENT, node.prompt[0]))
f.write('%s:emphasis:`Found in:` %s\n\n' % (INDENT, get_breadcrumbs(node)))
try:
if node.help:
# Help text normally contains newlines, but spaces at the beginning of
# each line are stripped by kconfiglib. We need to re-indent the text
# to produce valid ReST.
f.write(format_rest_text(node.help, INDENT))
except AttributeError:
pass # No help
if isinstance(node.item, kconfiglib.Choice):
f.write('%sAvailable options:\n' % INDENT)
choice_node = node.list
while choice_node:
# Format available options as a list
f.write('%s- %-20s (%s)\n' % (INDENT * 2, choice_node.prompt[0], choice_node.item.name))
if choice_node.help:
HELP_INDENT = INDENT * 2
fmt_help = format_rest_text(choice_node.help, ' ' + HELP_INDENT)
f.write('%s \n%s\n' % (HELP_INDENT, fmt_help))
choice_node = choice_node.next
f.write('\n\n')
if is_menu:
# enumerate links to child items
first = True
child = node.list
while child:
try:
if node_should_write(child):
if first:
f.write("Contains:\n\n")
first = False
f.write('- :ref:`%s`\n' % get_link_anchor(child))
except AttributeError:
pass
child = child.next
f.write('\n')
if __name__ == '__main__':
print("Run this via 'confgen.py --output doc FILENAME'")
| {
"content_hash": "f0aa9ee057719c71e10e98593e8145f3",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 107,
"avg_line_length": 30.79245283018868,
"alnum_prop": 0.5892565359477124,
"repo_name": "espressif/ESP8266_RTOS_SDK",
"id": "6d6d7b1fafe81ab62a6e26a00a0c1975432da1f3",
"size": "5806",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/kconfig_new/gen_kconfig_doc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "48701"
},
{
"name": "Batchfile",
"bytes": "8453"
},
{
"name": "C",
"bytes": "16675480"
},
{
"name": "C++",
"bytes": "788226"
},
{
"name": "CMake",
"bytes": "197769"
},
{
"name": "Dockerfile",
"bytes": "2032"
},
{
"name": "Inno Setup",
"bytes": "3663"
},
{
"name": "Lex",
"bytes": "7800"
},
{
"name": "M4",
"bytes": "98046"
},
{
"name": "Makefile",
"bytes": "159517"
},
{
"name": "Pascal",
"bytes": "52308"
},
{
"name": "Perl",
"bytes": "15204"
},
{
"name": "Python",
"bytes": "1161765"
},
{
"name": "Shell",
"bytes": "100056"
},
{
"name": "Smarty",
"bytes": "5972"
},
{
"name": "VBScript",
"bytes": "294"
},
{
"name": "Yacc",
"bytes": "15875"
}
],
"symlink_target": ""
} |
from os import sys
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='{{ cookiecutter.python_package_name }}',
version='0.1.0',
author='{{ cookiecutter.full_name }}',
author_email='{{ cookiecutter.email }}',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'{{ cookiecutter.download_url }}',
description=r'{{ cookiecutter.project_short_description }}',
long_description='{{ cookiecutter.project_long_description }}',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit',
url=r'https://itk.org/',
install_requires=[
r'itk>=5.3rc3'
]
)
| {
"content_hash": "9b26065e55025163152ed3cdb3edfb71",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 76,
"avg_line_length": 35.638297872340424,
"alnum_prop": 0.6143283582089553,
"repo_name": "thewtex/ITKModuleTemplate",
"id": "cb05a9689e0800b74ba2392a13f35f816360f04a",
"size": "1699",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "{{cookiecutter.project_name}}/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "13798"
},
{
"name": "CMake",
"bytes": "2649"
},
{
"name": "Python",
"bytes": "1699"
}
],
"symlink_target": ""
} |
"""Basic arithmetic operators.
See the @{$python/math_ops} guide.
@@add
@@subtract
@@multiply
@@scalar_mul
@@div
@@divide
@@truediv
@@floordiv
@@realdiv
@@truncatediv
@@floor_div
@@truncatemod
@@floormod
@@mod
@@cross
@@add_n
@@abs
@@negative
@@sign
@@reciprocal
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@expm1
@@log
@@log1p
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@tan
@@acos
@@asin
@@atan
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
@@betainc
@@rint
@@diag
@@diag_part
@@trace
@@transpose
@@eye
@@matrix_diag
@@matrix_diag_part
@@matrix_band_part
@@matrix_set_diag
@@matrix_transpose
@@matmul
@@norm
@@matrix_determinant
@@matrix_inverse
@@cholesky
@@cholesky_solve
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
@@qr
@@self_adjoint_eig
@@self_adjoint_eigvals
@@svd
@@tensordot
@@complex
@@conj
@@imag
@@real
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@reduce_logsumexp
@@count_nonzero
@@accumulate_n
@@einsum
@@cumsum
@@cumprod
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@unsorted_segment_max
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
@@argmin
@@argmax
@@setdiff1d
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
# Aliases for some automatically-generated names.
linspace = gen_math_ops.lin_space
# pylint: disable=redefined-builtin
# TODO(aselle): deprecate arg_max
def argmax(input, axis=None, name=None, dimension=None):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name)
argmax.__doc__ = (gen_math_ops.arg_max.__doc__.replace(
"dimensions", "axes").replace("dimension", "axis"))
# TODO(aselle:deprecate arg_min)
def argmin(input, axis=None, name=None, dimension=None):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name)
argmin.__doc__ = (gen_math_ops.arg_min.__doc__.replace(
"dimensions", "axes").replace("dimension", "axis"))
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
Args:
x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`, or
`int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops._complex_abs(
x.values, Tout=x.values.dtype.real_dtype, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops._complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
return x / y
def multiply(x, y, name=None):
return gen_math_ops._mul(x, y, name)
multiply.__doc__ = gen_math_ops._mul.__doc__.replace("Mul", "`tf.multiply`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops._mul(x, y, name)
_mul.__doc__ = (gen_math_ops._mul.__doc__
+ ("" if _mul.__doc__ is None else _mul.__doc__))
def subtract(x, y, name=None):
return gen_math_ops._sub(x, y, name)
subtract.__doc__ = gen_math_ops._sub.__doc__.replace("`Sub`", "`tf.subtract`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops._sub(x, y, name)
_sub.__doc__ = (gen_math_ops._sub.__doc__
+ ("" if _sub.__doc__ is None else _sub.__doc__))
# pylint: disable=g-docstring-has-escape
def negative(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_neg = gen_math_ops._neg(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_neg, dense_shape=x.dense_shape)
else:
return gen_math_ops._neg(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=g-docstring-has-escape
@deprecated(
"2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sign, dense_shape=x.dense_shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, dense_shape=x.dense_shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sqrt, dense_shape=x.dense_shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_erf, dense_shape=x.dense_shape)
else:
return gen_math_ops.erf(x, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`,
`float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name,
imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
r"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
# pylint: enable=redefined-outer-name,redefined-builtin
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, 1.5, -4.5]
tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(
value,
ops.convert_to_tensor(
dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(
value,
ops.convert_to_tensor(
dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops._neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, sparse_tensor.SparseTensor):
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(
sp_x.indices,
func(
sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name),
sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops._real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics. Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops._real_div(x, y, name=name)
else:
return gen_math_ops._floor_div(x, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
division operator semantics.
This function divides `x` and `y`, forcing Python 2.7 semantics. That is,
if one of `x` or `y` is a float, then the result will be a float.
Otherwise, the output will be an integer type. Flooring semantics are used
for integer division.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
# TODO(aselle): This should be removed
mod = gen_math_ops._floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops._floor_div(x, y, name=name)
realdiv = gen_math_ops._real_div
truncatediv = gen_math_ops._truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops._floor_div
truncatemod = gen_math_ops._truncate_mod
floormod = gen_math_ops._floor_mod
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops._mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python sematnics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops._sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(_div_python2, "div")
_OverrideBinaryOperatorHelper(_truediv_python3, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
# TODO(aselle): Switch mod to floor_mod when ready
# _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(gen_math_ops._floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'start' is 3
# 'limit' is 1
# 'delta' is -0.5
tf.range(start, limit, delta) ==> [3, 2.5, 2, 1.5]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
# Reduction operations
def _ReductionDims(x, axis, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.sum
@end_compatibility
"""
return gen_math_ops._sum(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def count_nonzero(input_tensor,
axis=None,
keep_dims=False,
dtype=dtypes.int64,
name=None,
reduction_indices=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
# 'x' is [[0, 1, 0]
# [1, 1, 0]]
tf.count_nonzero(x) ==> 3
tf.count_nonzero(x, 0) ==> [1, 2, 0]
tf.count_nonzero(x, 1) ==> [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) ==> [[1], [2]]
tf.count_nonzero(x, [0, 1]) ==> 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
axis=axis,
keep_dims=keep_dims,
reduction_indices=reduction_indices),
dtype=dtype)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
@end_compatibility
"""
return gen_math_ops._mean(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_prod(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
return gen_math_ops._prod(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_min(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
return gen_math_ops._min(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_max(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
return gen_math_ops._max(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_all(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
return gen_math_ops._all(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_any(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
return gen_math_ops._any(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_logsumexp(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
# 'x' is [[0, 0, 0]]
# [0, 0, 0]]
tf.reduce_logsumexp(x) ==> log(6)
tf.reduce_logsumexp(x, 0) ==> [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) ==> [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keep_dims=True) ==> [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) ==> log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
my_max = array_ops.stop_gradient(
reduce_max(
input_tensor,
axis=axis,
reduction_indices=reduction_indices,
keep_dims=True))
result = gen_math_ops.log(
reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
axis,
keep_dims=True,
reduction_indices=reduction_indices)) + my_max
if not keep_dims:
if isinstance(axis, int):
axis = [axis]
result = array_ops.squeeze(result, axis)
return result
def trace(x, name=None):
""" Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
# 'x' is [[1, 2],
# [3, 4]]
tf.trace(x) ==> 5
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
# 'x' is [[[1,2,3],
# [4,5,6],
# [7,8,9]],
# [[-1,-2,-3],
# [-4,-5,-6],
# [-7,-8,-9]]]
tf.trace(x) ==> [15,-15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be matrices (or tensors of rank > 2, representing batches of
matrices), with matching inner dimensions, possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
# 3-D tensor `a`
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3]) => [[[ 1. 2. 3.]
[ 4. 5. 6.]],
[[ 7. 8. 9.]
[10. 11. 12.]]]
# 3-D tensor `b`
b = tf.constant(np.arange(13, 25, dtype=np.int32),
shape=[2, 3, 2]) => [[[13. 14.]
[15. 16.]
[17. 18.]],
[[19. 20.]
[21. 22.]
[23. 24.]]]
c = tf.matmul(a, b) => [[[ 94 100]
[229 244]],
[[508 532]
[697 730]]]
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most matrix is
the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),
for all indices i, j.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b
are both set to True.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_shape = a.get_shape()
b_shape = b.get_shape()
if (not a_is_sparse and not b_is_sparse) and (
(a_shape.ndims is None or a_shape.ndims > 2) and
(b_shape.ndims is None or b_shape.ndims > 2)):
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
return gen_math_ops._batch_mat_mul(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values,
cast(o.indices, dtypes.int64), o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
NOTE: This operation is not differentiable and cannot be used if inputs depend
on trainable variables. Please use `tf.add_n` for such cases.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if len(inputs) == 1:
return inputs[0]
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(
shape=tensor_shape.vector(0), dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [
state_ops.assign_add(
ref, input_tensor, use_locking=True) for input_tensor in inputs
]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
@compatibility(numpy)
Equivalent to np.scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, `int64`, or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x` if
`x.dtype != qint32` otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
else:
return gen_math_ops._tanh(x, name=name)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```prettyprint
tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```prettyprint
tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```prettyprint
tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```prettyprint
tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first
element of the input is identical to the first element of the output:
```prettyprint
tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```prettyprint
tf.cumprod([a, b, c], exclusive=True) ==> [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```prettyprint
tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```prettyprint
tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [
common_shapes.broadcast_shape(op.inputs[0].get_shape(),
op.inputs[1].get_shape())
]
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)
]) # [1, 1]
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
The lists `a_axes` and `b_axes` specify those pairs of axes along which to
contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
`a_axes` and `b_axes` must have identical length and consist of unique
integers that specify valid axes for each of the tensors.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: Suppose that \\(a_ijk\\) and \\(b_lmn\\) represent two
tensors of order 3. Then, `contract(a, b, [0], [2])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes
of b in order.
If axes is a list or `Tensor` the first and second row contain the set of
unique integers specifying axes along which the contraction is computed,
for `a` and `b`, respectively. The number of axes for `a` and `b` must
be equal.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necesary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A pair `(reshaped_a, free_dims)` where `reshaped_a` is the tensor `a`
reshaped to allow contraction via `matmul` and `free_dims` is either a
list of integers or an `int32` `Tensor`, depending on if `axes` is a list
and the shape of `a` is fully defined.
"""
# TODO(b/33084409): Implement partial shape inference.
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims
else:
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = cast(axes >= 0, dtypes.int32) * axes + cast(
axes < 0, dtypes.int32) * (axes + rank_a)
free, _ = array_ops.setdiff1d(range(rank_a), axes)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
perm = array_ops.concat([axes_dims, free_dims], 0)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 1:
raise ValueError("'axes' must be at least 1.")
if a_shape.ndims is not None:
return range(a_shape.ndims - axes, a_shape.ndims), range(axes)
else:
rank = array_ops.rank(a)
return (array_ops.range(
rank - axes, rank, dtype=dtypes.int32), array_ops.range(
rank, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s.",
len(a_axes), len(b_axes))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims = _tensordot_reshape(b, b_axes, True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims)
b_free_dims = ops.convert_to_tensor(b_free_dims)
return array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
| {
"content_hash": "d90bdd61b1bb24978bb5ce19d6d22c62",
"timestamp": "",
"source": "github",
"line_count": 2289,
"max_line_length": 80,
"avg_line_length": 32.79641764962866,
"alnum_prop": 0.6382757656085573,
"repo_name": "yaroslavvb/tensorflow",
"id": "e1b52a4086dabfba81547a76bc0c9cdfea1dfffd",
"size": "75760",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/math_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "171999"
},
{
"name": "C++",
"bytes": "21262959"
},
{
"name": "CMake",
"bytes": "122876"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "775516"
},
{
"name": "HTML",
"bytes": "557007"
},
{
"name": "Java",
"bytes": "271894"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36990"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "194247"
},
{
"name": "Python",
"bytes": "17754663"
},
{
"name": "Shell",
"bytes": "320602"
},
{
"name": "TypeScript",
"bytes": "773493"
}
],
"symlink_target": ""
} |
"""Helper functions for stoichiometric consistency checks."""
import logging
from collections import defaultdict
import cobra
import numpy as np
import sympy
from numpy.linalg import svd
from optlang.symbolics import add
from pylru import lrudecorator
from six import iteritems, itervalues
from memote.support.helpers import find_biomass_reaction
__all__ = ("stoichiometry_matrix", "nullspace")
LOGGER = logging.getLogger(__name__)
def is_only_substrate(metabolite: cobra.Metabolite, reaction: cobra.Reaction) -> bool:
"""Determine if a metabolite is only a substrate of a reaction."""
if reaction.reversibility:
return False
if reaction.get_coefficient(metabolite) < 0:
return reaction.lower_bound >= 0.0 and reaction.upper_bound > 0
else:
return reaction.lower_bound < 0.0 and reaction.upper_bound <= 0
def is_only_product(metabolite: cobra.Metabolite, reaction: cobra.Reaction) -> bool:
"""Determine if a metabolite is only a product of a reaction."""
if reaction.reversibility:
return False
if reaction.get_coefficient(metabolite) > 0:
return reaction.lower_bound >= 0.0 and reaction.upper_bound > 0
else:
return reaction.lower_bound < 0.0 and reaction.upper_bound <= 0
def add_reaction_constraints(model, reactions, Constraint):
"""
Add the stoichiometric coefficients as constraints.
Parameters
----------
model : optlang.Model
The transposed stoichiometric matrix representation.
reactions : iterable
Container of `cobra.Reaction` instances.
Constraint : optlang.Constraint
The constraint class for the specific interface.
"""
constraints = []
for rxn in reactions:
expression = add(
[c * model.variables[m.id] for m, c in rxn.metabolites.items()]
)
constraints.append(Constraint(expression, lb=0, ub=0, name=rxn.id))
model.add(constraints)
def stoichiometry_matrix(metabolites, reactions):
"""
Return the stoichiometry matrix representation of a set of reactions.
The reactions and metabolites order is respected. All metabolites are
expected to be contained and complete in terms of the reactions.
Parameters
----------
reactions : iterable
A somehow ordered list of unique reactions.
metabolites : iterable
A somehow ordered list of unique metabolites.
Returns
-------
numpy.array
The 2D array that represents the stoichiometry matrix.
dict
A dictionary mapping metabolites to row indexes.
dict
A dictionary mapping reactions to column indexes.
"""
matrix = np.zeros((len(metabolites), len(reactions)))
met_index = dict((met, i) for i, met in enumerate(metabolites))
rxn_index = dict()
for i, rxn in enumerate(reactions):
rxn_index[rxn] = i
for met, coef in iteritems(rxn.metabolites):
j = met_index[met]
matrix[j, i] = coef
return matrix, met_index, rxn_index
def rank(matrix, atol=1e-13, rtol=0):
"""
Estimate the rank, i.e., the dimension of the column space, of a matrix.
The algorithm used by this function is based on the singular value
decomposition of `stoichiometry_matrix`.
Parameters
----------
matrix : ndarray
The matrix should be at most 2-D. A 1-D array with length k
will be treated as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than ``atol`` are considered to be zero.
rtol : float
The relative tolerance for a zero singular value. Singular values less
than the relative tolerance times the largest singular value are
considered to be zero
Notes
-----
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than ``tol`` are considered to be zero.
Returns
-------
int
The estimated rank of the matrix.
See Also
--------
numpy.linalg.matrix_rank
matrix_rank is basically the same as this function, but it does not
provide the option of the absolute tolerance.
"""
matrix = np.atleast_2d(matrix)
sigma = svd(matrix, compute_uv=False)
tol = max(atol, rtol * sigma[0])
return int((sigma >= tol).sum())
def nullspace(matrix, atol=1e-13, rtol=0.0):
"""
Compute an approximate basis for the null space (kernel) of a matrix.
The algorithm used by this function is based on the singular value
decomposition of the given matrix.
Parameters
----------
matrix : ndarray
The matrix should be at most 2-D. A 1-D array with length k
will be treated as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than ``atol`` are considered to be zero.
rtol : float
The relative tolerance for a zero singular value. Singular values less
than the relative tolerance times the largest singular value are
considered to be zero.
Notes
-----
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than ``tol`` are considered to be zero.
Returns
-------
ndarray
If ``matrix`` is an array with shape (m, k), then the returned
nullspace will be an array with shape ``(k, n)``, where n is the
estimated dimension of the nullspace.
References
----------
Adapted from:
https://scipy.github.io/old-wiki/pages/Cookbook/RankNullspace.html
""" # noqa: D402
matrix = np.atleast_2d(matrix)
_, sigma, vh = svd(matrix)
tol = max(atol, rtol * sigma[0])
num_nonzero = (sigma >= tol).sum()
return vh[num_nonzero:].conj().T
@lrudecorator(size=2)
def get_interface(model):
"""
Return the interface specific classes.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
return (
model.solver.interface.Model,
model.solver.interface.Constraint,
model.solver.interface.Variable,
model.solver.interface.Objective,
)
@lrudecorator(size=2)
def get_internals(model):
"""
Return non-boundary reactions and their metabolites.
Boundary reactions are unbalanced by their nature. They are excluded here
and only the metabolites of the others are considered.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
biomass = set(find_biomass_reaction(model))
if len(biomass) == 0:
LOGGER.warning(
"No biomass reaction detected. Consistency test results "
"are unreliable if one exists."
)
return set(model.reactions) - (set(model.boundary) | biomass)
def create_milp_problem(kernel, metabolites, Model, Variable, Constraint, Objective):
"""
Create the MILP as defined by equation (13) in [1]_.
Parameters
----------
kernel : numpy.array
A 2-dimensional array that represents the left nullspace of the
stoichiometric matrix which is the nullspace of the transpose of the
stoichiometric matrix.
metabolites : iterable
The metabolites in the nullspace. The length of this vector must equal
the first dimension of the nullspace.
Model : optlang.Model
Model class for a specific optlang interface.
Variable : optlang.Variable
Variable class for a specific optlang interface.
Constraint : optlang.Constraint
Constraint class for a specific optlang interface.
Objective : optlang.Objective
Objective class for a specific optlang interface.
References
----------
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245.
"""
assert (
len(metabolites) == kernel.shape[0]
), "metabolite vector and first nullspace dimension must be equal"
ns_problem = Model()
k_vars = list()
for met in metabolites:
# The element y[i] of the mass vector.
y_var = Variable(met.id)
k_var = Variable("k_{}".format(met.id), type="binary")
k_vars.append(k_var)
ns_problem.add([y_var, k_var])
# These following constraints are equivalent to 0 <= y[i] <= k[i].
ns_problem.add(Constraint(y_var - k_var, ub=0, name="switch_{}".format(met.id)))
ns_problem.add(Constraint(y_var, lb=0, name="switch2_{}".format(met.id)))
ns_problem.update()
# add nullspace constraints
for (j, column) in enumerate(kernel.T):
expression = sympy.Add(
*[
coef * ns_problem.variables[met.id]
for (met, coef) in zip(metabolites, column)
if coef != 0.0
]
)
constraint = Constraint(expression, lb=0, ub=0, name="ns_{}".format(j))
ns_problem.add(constraint)
# The objective is to minimize the binary indicators k[i], subject to
# the above inequality constraints.
ns_problem.objective = Objective(1)
ns_problem.objective.set_linear_coefficients({k_var: 1.0 for k_var in k_vars})
ns_problem.objective.direction = "min"
return ns_problem, k_vars
def add_cut(problem, indicators, bound, Constraint):
"""
Add an integer cut to the problem.
Ensure that the same solution involving these indicator variables cannot be
found by enforcing their sum to be less than before.
Parameters
----------
problem : optlang.Model
Specific optlang interface Model instance.
indicators : iterable
Binary indicator `optlang.Variable`s.
bound : int
Should be one less than the sum of indicators. Corresponds to P - 1 in
equation (14) in [1]_.
Constraint : optlang.Constraint
Constraint class for a specific optlang interface.
References
----------
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245.
"""
cut = Constraint(sympy.Add(*indicators), ub=bound)
problem.add(cut)
return cut
def is_mass_balanced(reaction):
"""Confirm that a reaction is mass balanced."""
balance = defaultdict(int)
for metabolite, coefficient in iteritems(reaction.metabolites):
if metabolite.elements is None or len(metabolite.elements) == 0:
return False
for element, amount in iteritems(metabolite.elements):
balance[element] += coefficient * amount
return all(amount == 0 for amount in itervalues(balance))
def is_charge_balanced(reaction):
"""Confirm that a reaction is charge balanced."""
charge = 0
for metabolite, coefficient in iteritems(reaction.metabolites):
if metabolite.charge is None:
return False
charge += coefficient * metabolite.charge
return charge == 0
| {
"content_hash": "276194c6f1a07c92ff3301031c245327",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 88,
"avg_line_length": 32.43553008595988,
"alnum_prop": 0.6487632508833923,
"repo_name": "opencobra/memote",
"id": "c8f29da44867f237ca77ebb474c45281b7388a4f",
"size": "11998",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/memote/support/consistency_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2979"
},
{
"name": "HTML",
"bytes": "3600838"
},
{
"name": "JavaScript",
"bytes": "2308"
},
{
"name": "Makefile",
"bytes": "2411"
},
{
"name": "Python",
"bytes": "632966"
},
{
"name": "Shell",
"bytes": "627"
},
{
"name": "TypeScript",
"bytes": "45177"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from ortools.constraint_solver import pywrapcp
VEHICLE_COUNT = 30
VEHICLE_CAPACITY = 200
Customer = namedtuple("Customer", ['index', 'demand', 'x', 'y'])
print('Init')
customers = list()
customers.append(Customer(0, 0, 0, 0))
customers.append(Customer(1, 1, 1.0, 1.0))
customers.append(Customer(1, 1, 2.0, 2.0))
customer_count = len(customers)
manager = pywrapcp.RoutingIndexManager(3, VEHICLE_COUNT, 0)
routing = pywrapcp.RoutingModel(manager)
print('Demand Constraint')
demands = []
for i in range(0, customer_count):
demands.append(customers[i][1])
routing.AddVectorDimension(demands, VEHICLE_CAPACITY, True, "Demand")
print('Adding Costs')
def distance_callback(from_index, to_index):
#static just for the sake of the example
return 1
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
routing.CloseModel()
assignment = routing.Solve(None)
# Inspect solution and extract routes
routes = []
for i in range(0, routing.vehicles()):
route_number = i
routes.append([])
node = routing.Start(route_number)
route = []
route.append(0)
if routing.IsVehicleUsed(assignment, i):
while True:
node = assignment.Value(routing.NextVar(node))
if not routing.IsEnd(node):
route.append(int(node))
else:
break
route.append(0)
routes[route_number].append(route)
#This are the routes as list of lists
routes = [el[0] for el in routes]
#Now try to read the routes into a new assigment object fails
assignment2 = routing.ReadAssignmentFromRoutes(routes, True)
| {
"content_hash": "2ddc64ad7ba008d711e0ed9be42a0753",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 75,
"avg_line_length": 26.59375,
"alnum_prop": 0.7050528789659224,
"repo_name": "google/or-tools",
"id": "68ec9d59a486c96628bcc253b5554ec6453bf61b",
"size": "1725",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable",
"path": "examples/tests/issue117.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18599"
},
{
"name": "C",
"bytes": "11382"
},
{
"name": "C#",
"bytes": "498888"
},
{
"name": "C++",
"bytes": "14071164"
},
{
"name": "CMake",
"bytes": "219723"
},
{
"name": "Dockerfile",
"bytes": "149476"
},
{
"name": "Java",
"bytes": "459136"
},
{
"name": "Lex",
"bytes": "2271"
},
{
"name": "Makefile",
"bytes": "207007"
},
{
"name": "Python",
"bytes": "629275"
},
{
"name": "SWIG",
"bytes": "414259"
},
{
"name": "Shell",
"bytes": "83555"
},
{
"name": "Starlark",
"bytes": "235950"
},
{
"name": "Yacc",
"bytes": "26027"
},
{
"name": "sed",
"bytes": "45"
}
],
"symlink_target": ""
} |
import boto.sqs
import boto.sqs.queue
import argparse
from boto.sqs.message import Message
from boto.sqs.connection import SQSConnection
from boto.exception import SQSError
import sys
sys.path.append('/data')
from keys import access_key_id, secret_access_key
parser = argparse.ArgumentParser()
parser.add_argument("qname")
args = parser.parse_args()
conn = boto.sqs.connect_to_region("eu-west-1", aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
try:
q=conn.create_queue(args.qname)
print args.qname, " queue has been created or already exists"
except:
print "Could not create queue. possible too soon since deletion, wait 60 seconds"
| {
"content_hash": "538a5fb3f223123bdcfb5e9671d96565",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 120,
"avg_line_length": 29.043478260869566,
"alnum_prop": 0.7784431137724551,
"repo_name": "kjw0106/aws",
"id": "d0d1fc1839576248987216f3db21249551f9b113",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqs_application/create-aws-queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10560"
}
],
"symlink_target": ""
} |
""" Defines the scraped items dictionary model of items = {}. """
# -*- coding: utf-8 -*-
import scrapy
class CraigslistRental(scrapy.Item):
# Dictionary keys created for scraping in craigslist.py
cl_id = scrapy.Field()
price = scrapy.Field()
attributes = scrapy.Field()
housing = scrapy.Field()
neighborhood = scrapy.Field()
date = scrapy.Field()
location = scrapy.Field()
# Additional dictionary keys created upon data cleanse in pipelines.py
bedrooms = scrapy.Field()
bathrooms = scrapy.Field()
sqft = scrapy.Field()
latitude = scrapy.Field()
longitude = scrapy.Field()
latlng = scrapy.Field()
| {
"content_hash": "0f5788a62142b71d90e0b18d39203029",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 26.52,
"alnum_prop": 0.6621417797888386,
"repo_name": "jttyeung/investable",
"id": "23a1fe873ce069b7dbcfacfbe1a03de9884bd901",
"size": "663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rent_scraper/rent_scraper/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5739"
},
{
"name": "HTML",
"bytes": "19548"
},
{
"name": "JavaScript",
"bytes": "17584"
},
{
"name": "Python",
"bytes": "49254"
}
],
"symlink_target": ""
} |
from flask_restful import Resource, reqparse, fields, marshal_with
from dateutil import parser
from flask_jwt_extended import jwt_required
from .revisions import revision_fields
from ... import db
futuremark3dmark06result_fields = {
'id': fields.Integer,
'result_date': fields.DateTime(dt_format='iso8601'),
'sm2_score': fields.Integer(default=None),
'cpu_score': fields.Integer(default=None),
'sm3_score': fields.Integer(default=None),
'proxcyon_fps': fields.Fixed(decimals=2, default=None),
'fireflyforest_fps': fields.Fixed(decimals=2, default=None),
'cpu1_fps': fields.Fixed(decimals=2, default=None),
'cpu2_fps': fields.Fixed(decimals=2, default=None),
'canyonflight_fps': fields.Fixed(decimals=2, default=None),
'deepfreeze_fps': fields.Fixed(decimals=2, default=None),
'overall_score': fields.Integer(default=None),
'result_url': fields.String(default=None),
'revision': fields.Nested(revision_fields),
'uri': fields.Url('.futuremark3dmark06result', absolute=True)
}
from ...models import Revision, Futuremark3DMark06Result
class Futuremark3DMark06ResultListAPI(Resource):
@marshal_with(futuremark3dmark06result_fields,
envelope='futuremark3dmark06results')
def get(self):
return Futuremark3DMark06Result.query.order_by(
Futuremark3DMark06Result.overall_score.desc()).all()
class Futuremark3DMark06ResultAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('result_date', type=str, location='json')
self.reqparse.add_argument('sm2_score', type=int, location='json')
self.reqparse.add_argument('cpu_score', type=str, location='json')
self.reqparse.add_argument('sm3_score', type=int, location='json')
self.reqparse.add_argument('proxcyon_fps', type=str, location='json')
self.reqparse.add_argument('fireflyforest_fps', type=int,
location='json')
self.reqparse.add_argument('cpu1_fps', type=str, location='json')
self.reqparse.add_argument('cpu2_fps', type=int, location='json')
self.reqparse.add_argument('canyonflight_fps', type=str,
location='json')
self.reqparse.add_argument('deepfreeze_fps', type=str, location='json')
self.reqparse.add_argument('overall_score', type=str, location='json')
self.reqparse.add_argument('result_url', type=str, location='json')
super(Futuremark3DMark06ResultAPI, self).__init__()
@marshal_with(futuremark3dmark06result_fields,
envelope='futuremark3dmark06result')
def get(self, id):
return Futuremark3DMark06Result.query.get_or_404(id)
@jwt_required
@marshal_with(futuremark3dmark06result_fields,
envelope='futuremark3dmark06result')
def put(self, id):
futuremark3dmark06result = Futuremark3DMark06Result.query.get_or_404(
id)
args = self.reqparse.parse_args()
for k, v in args.items():
if v is not None:
# *dies a little inside*
if k == 'result_date':
setattr(futuremark3dmark06result, k, parser.parse(v))
else:
setattr(futuremark3dmark06result, k, v)
db.session.commit()
return futuremark3dmark06result
@jwt_required
def delete(self, id):
Futuremark3DMark06Result.query\
.filter(Futuremark3DMark06Result.id == id).delete()
db.session.commit()
return {'result': True}
class RevisionFuturemark3DMark06ResultListAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('result_date', type=str, location='json')
self.reqparse.add_argument('sm2_score', type=int, location='json')
self.reqparse.add_argument('cpu_score', type=str, location='json')
self.reqparse.add_argument('sm3_score', type=int, location='json')
self.reqparse.add_argument('proxcyon_fps', type=str, location='json')
self.reqparse.add_argument('fireflyforest_fps', type=int,
location='json')
self.reqparse.add_argument('cpu1_fps', type=str, location='json')
self.reqparse.add_argument('cpu2_fps', type=int, location='json')
self.reqparse.add_argument('canyonflight_fps', type=str,
location='json')
self.reqparse.add_argument('deepfreeze_fps', type=str, location='json')
self.reqparse.add_argument('overall_score', type=str, location='json')
self.reqparse.add_argument('result_url', type=str, location='json')
super(RevisionFuturemark3DMark06ResultListAPI, self).__init__()
@marshal_with(futuremark3dmark06result_fields,
envelope='futuremark3dmark06results')
def get(self, id):
revision = Revision.query.get_or_404(id)
return revision.futuremark3dmark06results.all()
@jwt_required
@marshal_with(futuremark3dmark06result_fields,
envelope='futuremark3dmark06result')
def post(self, id):
args = self.reqparse.parse_args()
# parse the datetime provided
rd = None
if args['result_date'] is not None:
rd = parser.parse(args['result_date'])
revision = Revision.query.get_or_404(id)
futuremark3dmark06result = Futuremark3DMark06Result(
result_date=rd,
sm2_score=args['sm2_score'],
cpu_score=args['cpu_score'],
sm3_score=args['sm3_score'],
proxcyon_fps=args['proxcyon_fps'],
fireflyforest_fps=args['fireflyforest_fps'],
cpu1_fps=args['cpu1_fps'],
cpu2_fps=args['cpu2_fps'],
canyonflight_fps=args['canyonflight_fps'],
deepfreeze_fps=args['deepfreeze_fps'],
overall_score=args['overall_score'],
result_url=args['result_url'])
futuremark3dmark06result.revision_id = revision.id
db.session.add(futuremark3dmark06result)
db.session.commit()
return futuremark3dmark06result, 201
| {
"content_hash": "70452858eeb55db9851093cb0c6b0f39",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 79,
"avg_line_length": 43.86619718309859,
"alnum_prop": 0.6442446620645368,
"repo_name": "rivalrockets/rivalrockets-api",
"id": "2ac5f9e6276acd482b05bf9629d570969616cc69",
"size": "6229",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/api_1_0/resources/futuremark3dmark06results.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "65375"
}
],
"symlink_target": ""
} |
import json
import click
from tabulate import tabulate
@click.command('alerts', short_help='List environments, services, groups and tags')
@click.option('--environments', '-E', is_flag=True, help='List alert environments.')
@click.option('--services', '-S', is_flag=True, help='List alert services.')
@click.option('--groups', '-g', is_flag=True, help='List alert groups.')
@click.option('--tags', '-T', is_flag=True, help='List alert tags.')
@click.pass_obj
def cli(obj, environments, services, groups, tags):
"""List alert environments, services, groups and tags."""
client = obj['client']
if environments:
if obj['output'] == 'json':
r = client.http.get('/environments')
click.echo(json.dumps(r['environments'], sort_keys=True, indent=4, ensure_ascii=False))
else:
headers = {'environment': 'ENVIRONMENT', 'count': 'COUNT', 'severityCounts': 'SEVERITY COUNTS', 'statusCounts': 'STATUS COUNTS'}
click.echo(tabulate(client.get_environments(), headers=headers, tablefmt=obj['output']))
elif services:
if obj['output'] == 'json':
r = client.http.get('/services')
click.echo(json.dumps(r['services'], sort_keys=True, indent=4, ensure_ascii=False))
else:
headers = {'environment': 'ENVIRONMENT', 'service': 'SERVICE', 'count': 'COUNT', 'severityCounts': 'SEVERITY COUNTS', 'statusCounts': 'STATUS COUNTS'}
click.echo(tabulate(client.get_services(), headers=headers, tablefmt=obj['output']))
elif groups:
if obj['output'] == 'json':
r = client.http.get('/alerts/groups')
click.echo(json.dumps(r['groups'], sort_keys=True, indent=4, ensure_ascii=False))
else:
headers = {'environment': 'ENVIRONMENT', 'group': 'GROUP', 'count': 'COUNT', 'severityCounts': 'SEVERITY COUNTS', 'statusCounts': 'STATUS COUNTS'}
click.echo(tabulate(client.get_groups(), headers=headers, tablefmt=obj['output']))
elif tags:
if obj['output'] == 'json':
r = client.http.get('/alerts/tags')
click.echo(json.dumps(r['tags'], sort_keys=True, indent=4, ensure_ascii=False))
else:
headers = {'environment': 'ENVIRONMENT', 'tag': 'TAG', 'count': 'COUNT', 'severityCounts': 'SEVERITY COUNTS', 'statusCounts': 'STATUS COUNTS'}
click.echo(tabulate(client.get_tags(), headers=headers, tablefmt=obj['output']))
else:
raise click.UsageError('Must choose an alert attribute to list.')
| {
"content_hash": "bab6d875479d6d76ce6f0ec9b58a7bdd",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 162,
"avg_line_length": 54.02127659574468,
"alnum_prop": 0.6266246553761323,
"repo_name": "alerta/python-alerta-client",
"id": "7041b2b1186cd40cba9e4e628698371e8c4e8754",
"size": "2539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alertaclient/commands/cmd_alerts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "253"
},
{
"name": "Makefile",
"bytes": "2622"
},
{
"name": "Python",
"bytes": "181621"
},
{
"name": "Shell",
"bytes": "5273"
}
],
"symlink_target": ""
} |
"""PPP module answering questions about integer sequences."""
from ppp_libmodule import HttpRequestHandler
from .requesthandler import RequestHandler
def app(environ, start_response):
"""Function called by the WSGI server."""
return HttpRequestHandler(environ, start_response, RequestHandler) \
.dispatch()
| {
"content_hash": "9fe2147b955b1e82fb113344a36ca6c1",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 36.55555555555556,
"alnum_prop": 0.7507598784194529,
"repo_name": "ProjetPP/PPP-OEIS",
"id": "5f718f330a3fa57374d57a577f4a5d1e05c50b34",
"size": "329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ppp_oeis/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "219"
},
{
"name": "Python",
"bytes": "11335"
},
{
"name": "Shell",
"bytes": "208"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from bcc import BPF
import argparse
from time import strftime
import ctypes as ct
# symbols
kallsyms = "/proc/kallsyms"
# arguments
examples = """examples:
./ext4slower # trace operations slower than 10 ms (default)
./ext4slower 1 # trace operations slower than 1 ms
./ext4slower -j 1 # ... 1 ms, parsable output (csv)
./ext4slower 0 # trace all operations (warning: verbose)
./ext4slower -p 185 # trace PID 185 only
"""
parser = argparse.ArgumentParser(
description="Trace common ext4 file operations slower than a threshold",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-j", "--csv", action="store_true",
help="just print fields: comma-separated values")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("min_ms", nargs="?", default='10',
help="minimum I/O duration to trace, in ms (default 10)")
args = parser.parse_args()
min_ms = int(args.min_ms)
pid = args.pid
csv = args.csv
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/dcache.h>
// XXX: switch these to char's when supported
#define TRACE_READ 0
#define TRACE_WRITE 1
#define TRACE_OPEN 2
#define TRACE_FSYNC 3
struct val_t {
u64 ts;
u64 offset;
struct file *fp;
};
struct data_t {
// XXX: switch some to u32's when supported
u64 ts_us;
u64 type;
u64 size;
u64 offset;
u64 delta_us;
u64 pid;
char task[TASK_COMM_LEN];
char file[DNAME_INLINE_LEN];
};
BPF_HASH(entryinfo, pid_t, struct val_t);
BPF_PERF_OUTPUT(events);
//
// Store timestamp and size on entry
//
// The current ext4 (Linux 4.5) uses generic_file_read_iter(), instead of it's
// own function, for reads. So we need to trace that and then filter on ext4,
// which I do by checking file->f_op.
int trace_read_entry(struct pt_regs *ctx, struct kiocb *iocb)
{
u32 pid;
pid = bpf_get_current_pid_tgid();
if (FILTER_PID)
return 0;
// ext4 filter on file->f_op == ext4_file_operations
struct file *fp = iocb->ki_filp;
if ((u64)fp->f_op != EXT4_FILE_OPERATIONS)
return 0;
// store filep and timestamp by pid
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = fp;
val.offset = iocb->ki_pos;
if (val.fp)
entryinfo.update(&pid, &val);
return 0;
}
// ext4_file_write_iter():
int trace_write_entry(struct pt_regs *ctx, struct kiocb *iocb)
{
u32 pid;
pid = bpf_get_current_pid_tgid();
if (FILTER_PID)
return 0;
// store filep and timestamp by pid
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = iocb->ki_filp;
val.offset = iocb->ki_pos;
if (val.fp)
entryinfo.update(&pid, &val);
return 0;
}
// ext4_file_open():
int trace_open_entry(struct pt_regs *ctx, struct inode *inode,
struct file *file)
{
u32 pid;
pid = bpf_get_current_pid_tgid();
if (FILTER_PID)
return 0;
// store filep and timestamp by pid
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = file;
val.offset = 0;
if (val.fp)
entryinfo.update(&pid, &val);
return 0;
}
// ext4_sync_file():
int trace_fsync_entry(struct pt_regs *ctx, struct file *file)
{
u32 pid;
pid = bpf_get_current_pid_tgid();
if (FILTER_PID)
return 0;
// store filep and timestamp by pid
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = file;
val.offset = 0;
if (val.fp)
entryinfo.update(&pid, &val);
return 0;
}
//
// Output
//
static int trace_return(struct pt_regs *ctx, int type)
{
struct val_t *valp;
u32 pid = bpf_get_current_pid_tgid();
valp = entryinfo.lookup(&pid);
if (valp == 0) {
// missed tracing issue or filtered
return 0;
}
// calculate delta
u64 ts = bpf_ktime_get_ns();
u64 delta_us = (ts - valp->ts) / 1000;
entryinfo.delete(&pid);
if (FILTER_US)
return 0;
// workaround (rewriter should handle file to d_iname in one step):
struct dentry *de = NULL;
bpf_probe_read(&de, sizeof(de), &valp->fp->f_path.dentry);
// populate output struct
u32 size = PT_REGS_RC(ctx);
struct data_t data = {.type = type, .size = size, .delta_us = delta_us,
.pid = pid};
data.ts_us = ts / 1000;
data.offset = valp->offset;
bpf_probe_read(&data.file, sizeof(data.file), de->d_iname);
bpf_get_current_comm(&data.task, sizeof(data.task));
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
int trace_read_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_READ);
}
int trace_write_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_WRITE);
}
int trace_open_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_OPEN);
}
int trace_fsync_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_FSYNC);
}
"""
# code replacements
with open(kallsyms) as syms:
ops = ''
for line in syms:
(addr, size, name) = line.rstrip().split(" ", 2)
if name == "ext4_file_operations":
ops = "0x" + addr
break
if ops == '':
print("ERROR: no ext4_file_operations in /proc/kallsyms. Exiting.")
exit()
bpf_text = bpf_text.replace('EXT4_FILE_OPERATIONS', ops)
if min_ms == 0:
bpf_text = bpf_text.replace('FILTER_US', '0')
else:
bpf_text = bpf_text.replace('FILTER_US',
'delta_us <= %s' % str(min_ms * 1000))
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
else:
bpf_text = bpf_text.replace('FILTER_PID', '0')
if debug:
print(bpf_text)
# kernel->user event data: struct data_t
DNAME_INLINE_LEN = 32 # linux/dcache.h
TASK_COMM_LEN = 16 # linux/sched.h
class Data(ct.Structure):
_fields_ = [
("ts_us", ct.c_ulonglong),
("type", ct.c_ulonglong),
("size", ct.c_ulonglong),
("offset", ct.c_ulonglong),
("delta_us", ct.c_ulonglong),
("pid", ct.c_ulonglong),
("task", ct.c_char * TASK_COMM_LEN),
("file", ct.c_char * DNAME_INLINE_LEN)
]
# process event
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data)).contents
type = 'R'
if event.type == 1:
type = 'W'
elif event.type == 2:
type = 'O'
elif event.type == 3:
type = 'S'
if (csv):
print("%d,%s,%d,%s,%d,%d,%d,%s" % (
event.ts_us, event.task, event.pid, type, event.size,
event.offset, event.delta_us, event.file))
return
print("%-8s %-14.14s %-6s %1s %-7s %-8d %7.2f %s" % (strftime("%H:%M:%S"),
event.task, event.pid, type, event.size, event.offset / 1024,
float(event.delta_us) / 1000, event.file))
# initialize BPF
b = BPF(text=bpf_text)
# Common file functions. See earlier comment about generic_file_read_iter().
b.attach_kprobe(event="generic_file_read_iter", fn_name="trace_read_entry")
b.attach_kprobe(event="ext4_file_write_iter", fn_name="trace_write_entry")
b.attach_kprobe(event="ext4_file_open", fn_name="trace_open_entry")
b.attach_kprobe(event="ext4_sync_file", fn_name="trace_fsync_entry")
b.attach_kretprobe(event="generic_file_read_iter", fn_name="trace_read_return")
b.attach_kretprobe(event="ext4_file_write_iter", fn_name="trace_write_return")
b.attach_kretprobe(event="ext4_file_open", fn_name="trace_open_return")
b.attach_kretprobe(event="ext4_sync_file", fn_name="trace_fsync_return")
# header
if (csv):
print("ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE")
else:
if min_ms == 0:
print("Tracing ext4 operations")
else:
print("Tracing ext4 operations slower than %d ms" % min_ms)
print("%-8s %-14s %-6s %1s %-7s %-8s %7s %s" % ("TIME", "COMM", "PID", "T",
"BYTES", "OFF_KB", "LAT(ms)", "FILENAME"))
# read events
b["events"].open_perf_buffer(print_event)
while 1:
b.kprobe_poll()
| {
"content_hash": "7ff105a1d505fb15a505055e178b2cfe",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 79,
"avg_line_length": 26.63606557377049,
"alnum_prop": 0.6106597735105859,
"repo_name": "zaafar/bcc",
"id": "b0b72a544129e7a12625c59136e47ee1b0a8e34d",
"size": "9062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/ext4slower.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "83121"
},
{
"name": "C++",
"bytes": "659435"
},
{
"name": "CMake",
"bytes": "26003"
},
{
"name": "HTML",
"bytes": "2979"
},
{
"name": "LLVM",
"bytes": "4379"
},
{
"name": "Limbo",
"bytes": "6069"
},
{
"name": "Lua",
"bytes": "127048"
},
{
"name": "Objective-C",
"bytes": "16024"
},
{
"name": "Python",
"bytes": "281897"
},
{
"name": "Shell",
"bytes": "8732"
},
{
"name": "Yacc",
"bytes": "19817"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import datetime
from .enums import ActivityType, try_enum
from .colour import Colour
from .utils import _get_as_snowflake
__all__ = (
'Activity',
'Streaming',
'Game',
'Spotify',
)
"""If curious, this is the current schema for an activity.
It's fairly long so I will document it here:
All keys are optional.
state: str (max: 128),
details: str (max: 128)
timestamps: dict
start: int (min: 1)
end: int (min: 1)
assets: dict
large_image: str (max: 32)
large_text: str (max: 128)
small_image: str (max: 32)
small_text: str (max: 128)
party: dict
id: str (max: 128),
size: List[int] (max-length: 2)
elem: int (min: 1)
secrets: dict
match: str (max: 128)
join: str (max: 128)
spectate: str (max: 128)
instance: bool
application_id: str
name: str (max: 128)
url: str
type: int
sync_id: str
session_id: str
flags: int
There are also activity flags which are mostly uninteresting for the library atm.
t.ActivityFlags = {
INSTANCE: 1,
JOIN: 2,
SPECTATE: 4,
JOIN_REQUEST: 8,
SYNC: 16,
PLAY: 32
}
"""
class _ActivityTag:
__slots__ = ()
class Activity(_ActivityTag):
"""Represents an activity in Discord.
This could be an activity such as streaming, playing, listening
or watching.
For memory optimisation purposes, some activities are offered in slimmed
down versions:
- :class:`Game`
- :class:`Streaming`
Attributes
------------
application_id: :class:`int`
The application ID of the game.
name: :class:`str`
The name of the activity.
url: :class:`str`
A stream URL that the activity could be doing.
type: :class:`ActivityType`
The type of activity currently being done.
state: :class:`str`
The user's current state. For example, "In Game".
details: :class:`str`
The detail of the user's current activity.
timestamps: :class:`dict`
A dictionary of timestamps. It contains the following optional keys:
- ``start``: Corresponds to when the user started doing the
activity in milliseconds since Unix epoch.
- ``end``: Corresponds to when the user will finish doing the
activity in milliseconds since Unix epoch.
assets: :class:`dict`
A dictionary representing the images and their hover text of an activity.
It contains the following optional keys:
- ``large_image``: A string representing the ID for the large image asset.
- ``large_text``: A string representing the text when hovering over the large image asset.
- ``small_image``: A string representing the ID for the small image asset.
- ``small_text``: A string representing the text when hovering over the small image asset.
party: :class:`dict`
A dictionary representing the activity party. It contains the following optional keys:
- ``id``: A string representing the party ID.
- ``size``: A list of up to two integer elements denoting (current_size, maximum_size).
"""
__slots__ = ('state', 'details', 'timestamps', 'assets', 'party',
'flags', 'sync_id', 'session_id', 'type', 'name', 'url', 'application_id')
def __init__(self, **kwargs):
self.state = kwargs.pop('state', None)
self.details = kwargs.pop('details', None)
self.timestamps = kwargs.pop('timestamps', {})
self.assets = kwargs.pop('assets', {})
self.party = kwargs.pop('party', {})
self.application_id = _get_as_snowflake(kwargs, 'application_id')
self.name = kwargs.pop('name', None)
self.url = kwargs.pop('url', None)
self.flags = kwargs.pop('flags', 0)
self.sync_id = kwargs.pop('sync_id', None)
self.session_id = kwargs.pop('session_id', None)
self.type = try_enum(ActivityType, kwargs.pop('type', -1))
def to_dict(self):
ret = {}
for attr in self.__slots__:
value = getattr(self, attr, None)
if value is None:
continue
if isinstance(value, dict) and len(value) == 0:
continue
ret[attr] = value
ret['type'] = self.type.value
return ret
@property
def start(self):
"""Optional[:class:`datetime.datetime`]: When the user started doing this activity in UTC, if applicable."""
try:
return datetime.datetime.utcfromtimestamp(self.timestamps['start'] / 1000)
except KeyError:
return None
@property
def end(self):
"""Optional[:class:`datetime.datetime`]: When the user will stop doing this activity in UTC, if applicable."""
try:
return datetime.datetime.utcfromtimestamp(self.timestamps['end'] / 1000)
except KeyError:
return None
@property
def large_image_url(self):
"""Optional[:class:`str`]: Returns a URL pointing to the large image asset of this activity if applicable."""
if self.application_id is None:
return None
try:
large_image = self.assets['large_image']
except KeyError:
return None
else:
return 'https://cdn.discordapp.com/app-assets/{0}/{1}.png'.format(self.application_id, large_image)
@property
def small_image_url(self):
"""Optional[:class:`str`]: Returns a URL pointing to the small image asset of this activity if applicable."""
if self.application_id is None:
return None
try:
small_image = self.assets['small_image']
except KeyError:
return None
else:
return 'https://cdn.discordapp.com/app-assets/{0}/{1}.png'.format(self.application_id, small_image)
@property
def large_image_text(self):
"""Optional[:class:`str`]: Returns the large image asset hover text of this activity if applicable."""
return self.assets.get('large_text', None)
@property
def small_image_text(self):
"""Optional[:class:`str`]: Returns the small image asset hover text of this activity if applicable."""
return self.assets.get('small_text', None)
class Game(_ActivityTag):
"""A slimmed down version of :class:`Activity` that represents a Discord game.
This is typically displayed via **Playing** on the official Discord client.
.. container:: operations
.. describe:: x == y
Checks if two games are equal.
.. describe:: x != y
Checks if two games are not equal.
.. describe:: hash(x)
Returns the game's hash.
.. describe:: str(x)
Returns the game's name.
Parameters
-----------
name: :class:`str`
The game's name.
start: Optional[:class:`datetime.datetime`]
A naive UTC timestamp representing when the game started. Keyword-only parameter. Ignored for bots.
end: Optional[:class:`datetime.datetime`]
A naive UTC timestamp representing when the game ends. Keyword-only parameter. Ignored for bots.
Attributes
-----------
name: :class:`str`
The game's name.
"""
__slots__ = ('name', '_end', '_start')
def __init__(self, name, **extra):
self.name = name
try:
timestamps = extra['timestamps']
except KeyError:
self._extract_timestamp(extra, 'start')
self._extract_timestamp(extra, 'end')
else:
self._start = timestamps.get('start', 0)
self._end = timestamps.get('end', 0)
def _extract_timestamp(self, data, key):
try:
dt = data[key]
except KeyError:
setattr(self, '_' + key, 0)
else:
setattr(self, '_' + key, dt.timestamp() * 1000.0)
@property
def type(self):
"""Returns the game's type. This is for compatibility with :class:`Activity`.
It always returns :attr:`ActivityType.playing`.
"""
return ActivityType.playing
@property
def start(self):
"""Optional[:class:`datetime.datetime`]: When the user started playing this game in UTC, if applicable."""
if self._start:
return datetime.datetime.utcfromtimestamp(self._start / 1000)
return None
@property
def end(self):
"""Optional[:class:`datetime.datetime`]: When the user will stop playing this game in UTC, if applicable."""
if self._end:
return datetime.datetime.utcfromtimestamp(self._end / 1000)
return None
def __str__(self):
return str(self.name)
def __repr__(self):
return '<Game name={0.name!r}>'.format(self)
def to_dict(self):
timestamps = {}
if self._start:
timestamps['start'] = self._start
if self._end:
timestamps['end'] = self._end
return {
'type': ActivityType.playing.value,
'name': str(self.name),
'timestamps': timestamps
}
def __eq__(self, other):
return isinstance(other, Game) and other.name == self.name
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
class Streaming(_ActivityTag):
"""A slimmed down version of :class:`Activity` that represents a Discord streaming status.
This is typically displayed via **Streaming** on the official Discord client.
.. container:: operations
.. describe:: x == y
Checks if two streams are equal.
.. describe:: x != y
Checks if two streams are not equal.
.. describe:: hash(x)
Returns the stream's hash.
.. describe:: str(x)
Returns the stream's name.
Attributes
-----------
name: :class:`str`
The stream's name.
url: :class:`str`
The stream's URL. Currently only twitch.tv URLs are supported. Anything else is silently
discarded.
details: Optional[:class:`str`]
If provided, typically the game the streamer is playing.
assets: :class:`dict`
A dictionary comprising of similar keys than those in :attr:`Activity.assets`.
"""
__slots__ = ('name', 'url', 'details', 'assets')
def __init__(self, *, name, url, **extra):
self.name = name
self.url = url
self.details = extra.pop('details', None)
self.assets = extra.pop('assets', {})
@property
def type(self):
"""Returns the game's type. This is for compatibility with :class:`Activity`.
It always returns :attr:`ActivityType.streaming`.
"""
return ActivityType.streaming
def __str__(self):
return str(self.name)
def __repr__(self):
return '<Streaming name={0.name!r}>'.format(self)
@property
def twitch_name(self):
"""Optional[:class:`str`]: If provided, the twitch name of the user streaming.
This corresponds to the ``large_image`` key of the :attr:`Streaming.assets`
dictionary if it starts with ``twitch:``. Typically set by the Discord client.
"""
try:
name = self.assets['large_image']
except KeyError:
return None
else:
return name[7:] if name[:7] == 'twitch:' else None
def to_dict(self):
ret = {
'type': ActivityType.streaming.value,
'name': str(self.name),
'url': str(self.url),
'assets': self.assets
}
if self.details:
ret['details'] = self.details
return ret
def __eq__(self, other):
return isinstance(other, Streaming) and other.name == self.name and other.url == self.url
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
class Spotify:
"""Represents a Spotify listening activity from Discord. This is a special case of
:class:`Activity` that makes it easier to work with the Spotify integration.
.. container:: operations
.. describe:: x == y
Checks if two activities are equal.
.. describe:: x != y
Checks if two activities are not equal.
.. describe:: hash(x)
Returns the activity's hash.
.. describe:: str(x)
Returns the string 'Spotify'.
"""
__slots__ = ('_state', '_details', '_timestamps', '_assets', '_party', '_sync_id', '_session_id')
def __init__(self, **data):
self._state = data.pop('state', None)
self._details = data.pop('details', None)
self._timestamps = data.pop('timestamps', {})
self._assets = data.pop('assets', {})
self._party = data.pop('party', {})
self._sync_id = data.pop('sync_id')
self._session_id = data.pop('session_id')
@property
def type(self):
"""Returns the activity's type. This is for compatibility with :class:`Activity`.
It always returns :attr:`ActivityType.listening`.
"""
return ActivityType.listening
@property
def colour(self):
"""Returns the Spotify integration colour, as a :class:`Colour`.
There is an alias for this named :meth:`color`"""
return Colour(0x1db954)
@property
def color(self):
"""Returns the Spotify integration colour, as a :class:`Colour`.
There is an alias for this named :meth:`colour`"""
return self.colour
def to_dict(self):
return {
'flags': 48, # SYNC | PLAY
'name': 'Spotify',
'assets': self._assets,
'party': self._party,
'sync_id': self._sync_id,
'session_id': self._session_id,
'timestamps': self._timestamps,
'details': self._details,
'state': self._state
}
@property
def name(self):
""":class:`str`: The activity's name. This will always return "Spotify"."""
return 'Spotify'
def __eq__(self, other):
return (isinstance(other, Spotify) and other._session_id == self._session_id
and other._sync_id == self._sync_id and other.start == self.start)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._session_id)
def __str__(self):
return 'Spotify'
def __repr__(self):
return '<Spotify title={0.title!r} artist={0.artist!r} track_id={0.track_id!r}>'.format(self)
@property
def title(self):
""":class:`str`: The title of the song being played."""
return self._details
@property
def artists(self):
"""List[:class:`str`]: The artists of the song being played."""
return self._state.split('; ')
@property
def artist(self):
""":class:`str`: The artist of the song being played.
This does not attempt to split the artist information into
multiple artists. Useful if there's only a single artist.
"""
return self._state
@property
def album(self):
""":class:`str`: The album that the song being played belongs to."""
return self._assets.get('large_text', '')
@property
def album_cover_url(self):
""":class:`str`: The album cover image URL from Spotify's CDN."""
large_image = self._assets.get('large_image', '')
if large_image[:8] != 'spotify:':
return ''
album_image_id = large_image[8:]
return 'https://i.scdn.co/image/' + album_image_id
@property
def track_id(self):
""":class:`str`: The track ID used by Spotify to identify this song."""
return self._sync_id
@property
def start(self):
""":class:`datetime.datetime`: When the user started playing this song in UTC."""
return datetime.datetime.utcfromtimestamp(self._timestamps['start'] / 1000)
@property
def end(self):
""":class:`datetime.datetime`: When the user will stop playing this song in UTC."""
return datetime.datetime.utcfromtimestamp(self._timestamps['end'] / 1000)
@property
def duration(self):
""":class:`datetime.timedelta`: The duration of the song being played."""
return self.end - self.start
@property
def party_id(self):
""":class:`str`: The party ID of the listening party."""
return self._party.get('id', '')
def create_activity(data):
if not data:
return None
game_type = try_enum(ActivityType, data.get('type', -1))
if game_type is ActivityType.playing:
if 'application_id' in data or 'session_id' in data:
return Activity(**data)
return Game(**data)
elif game_type is ActivityType.streaming:
if 'url' in data:
return Streaming(**data)
return Activity(**data)
elif game_type is ActivityType.listening and 'sync_id' in data and 'session_id' in data:
return Spotify(**data)
return Activity(**data)
| {
"content_hash": "be1c50060bdec6d012296ef6bc7fe938",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 118,
"avg_line_length": 31.013651877133107,
"alnum_prop": 0.600583250797843,
"repo_name": "gnmiller/craig-bot",
"id": "fd9402095c044727b4fd7b717bb4a1ab45502c95",
"size": "18199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "craig-bot/lib/python3.6/site-packages/discord/activity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259967"
},
{
"name": "C++",
"bytes": "670"
},
{
"name": "Python",
"bytes": "5770206"
}
],
"symlink_target": ""
} |
from datetime import datetime
import pytz
import apache_beam as beam
from apache_beam.transforms import window
from log_elements import LogElements
p = beam.Pipeline()
(p | beam.Create([
window.TimestampedValue("event", datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=pytz.UTC).timestamp()),
window.TimestampedValue("event", datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=pytz.UTC).timestamp()),
window.TimestampedValue("event", datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=pytz.UTC).timestamp()),
window.TimestampedValue("event", datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=pytz.UTC).timestamp()),
window.TimestampedValue("event", datetime(2020, 3, 5, 0, 0, 0, 0, tzinfo=pytz.UTC).timestamp()),
window.TimestampedValue("event", datetime(2020, 3, 5, 0, 0, 0, 0, tzinfo=pytz.UTC).timestamp()),
window.TimestampedValue("event", datetime(2020, 3, 8, 0, 0, 0, 0, tzinfo=pytz.UTC).timestamp()),
window.TimestampedValue("event", datetime(2020, 3, 8, 0, 0, 0, 0, tzinfo=pytz.UTC).timestamp()),
window.TimestampedValue("event", datetime(2020, 3, 8, 0, 0, 0, 0, tzinfo=pytz.UTC).timestamp()),
window.TimestampedValue("event", datetime(2020, 3, 10, 0, 0, 0, 0, tzinfo=pytz.UTC).timestamp()),
])
| beam.WindowInto(window.FixedWindows(24*60*60))
| beam.combiners.Count.PerElement()
| LogElements(with_window=True))
p.run()
| {
"content_hash": "59f7837509a82a68dadeba43216fa8a3",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 105,
"avg_line_length": 49.535714285714285,
"alnum_prop": 0.6604181687094448,
"repo_name": "iemejia/incubator-beam",
"id": "0444becb41fe8756af09a6861a0691be6ad33fb8",
"size": "2198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learning/katas/python/Windowing/Fixed Time Window/Fixed Time Window/task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.