text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Implements additional custom Pylint checkers to be used as part of
presubmit checks. Next message id would be C0041.
"""
from __future__ import annotations
import linecache
import os
import re
import sys
import tokenize
from core import handler_schema_constants
from .. import docstrings_checker
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.9.4')
sys.path.insert(0, _PYLINT_PATH)
# List of punctuation symbols that can be used at the end of
# comments and docstrings.
ALLOWED_TERMINATING_PUNCTUATIONS = ['.', '?', '}', ']', ')']
# If any of these phrases are found inside a docstring or comment,
# the punctuation and capital letter checks will be skipped for that
# comment or docstring.
EXCLUDED_PHRASES = [
'coding:', 'pylint:', 'http://', 'https://', 'scripts/', 'extract_node'
]
ALLOWED_PRAGMAS_FOR_INLINE_COMMENTS = [
'pylint:', 'isort:', 'type: ignore', 'pragma:', 'https:']
import astroid # isort:skip pylint: disable=wrong-import-order, wrong-import-position
from pylint import checkers # isort:skip pylint: disable=wrong-import-order, wrong-import-position
from pylint import interfaces # isort:skip pylint: disable=wrong-import-order, wrong-import-position
from pylint.checkers import typecheck # isort:skip pylint: disable=wrong-import-order, wrong-import-position
from pylint.checkers import utils as checker_utils # isort:skip pylint: disable=wrong-import-order, wrong-import-position
from pylint.extensions import _check_docs_utils # isort:skip pylint: disable=wrong-import-order, wrong-import-position
def read_from_node(node):
"""Returns the data read from the ast node in unicode form.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
Returns:
list(str). The data read from the ast node.
"""
# Readlines returns bytes, thus we need to decode them to string.
return [line.decode('utf-8') for line in node.stream().readlines()]
class ExplicitKeywordArgsChecker(checkers.BaseChecker):
"""Custom pylint checker which checks for explicit keyword arguments
in any function call.
"""
__implements__ = interfaces.IAstroidChecker
name = 'explicit-keyword-args'
priority = -1
msgs = {
'C0001': (
'Keyword argument %s should be named explicitly in %s call of %s.',
'non-explicit-keyword-args',
'All keyword arguments should be explicitly named in function call.'
),
'C0027': (
'Keyword argument %s used for a non keyword argument in %s '
'call of %s.',
'arg-name-for-non-keyword-arg',
'Position arguments should not be used as keyword arguments '
'in function call.'
),
}
def _check_non_explicit_keyword_args(
self, node, name, callable_name, keyword_args,
num_positional_args_unused, num_mandatory_parameters):
"""Custom pylint check to ensure that position arguments should not
be used as keyword arguments.
Args:
node: astroid.node.Function. The current function call node.
name: str. Name of the keyword argument.
callable_name: str. Name of method type.
keyword_args: list(str). Name of all keyword arguments in function
call.
num_positional_args_unused: int. Number of unused positional
arguments.
num_mandatory_parameters: int. Number of mandatory parameters.
Returns:
int. Number of unused positional arguments.
"""
display_name = repr(name)
if name not in keyword_args and (
num_positional_args_unused > (
num_mandatory_parameters)) and (
callable_name != 'constructor'):
# This try/except block tries to get the function
# name. Since each node may differ, multiple
# blocks have been used.
try:
func_name = node.func.attrname
except AttributeError:
func_name = node.func.name
self.add_message(
'non-explicit-keyword-args', node=node,
args=(
display_name,
callable_name,
func_name))
num_positional_args_unused -= 1
return num_positional_args_unused
def _check_argname_for_nonkeyword_arg(
self, node, called, callable_name, keyword_args,
keyword_args_in_funcdef):
"""Custom pylint check to ensure that position arguments should not
be used as keyword arguments.
Args:
node: astroid.node.Function. The current function call node.
called: astroid.Call. The function call object.
keyword_args: list(str). Name of all keyword arguments in function
call.
callable_name: str. Name of method type.
keyword_args_in_funcdef: list(str). Name of all keyword arguments in
function definition.
"""
for arg in keyword_args:
# TODO(#10038): Fix the check to cover below case as well.
# If there is *args and **kwargs in the function definition skip the
# check because we can use keywords arguments in function call even
# if **kwargs is present in the function definition. See Example:
# Function def -> def func(entity_id, *args, **kwargs):
# Function call -> func(entity_id='1', a=1, b=2, c=3)
# By parsing calling method we get
# keyword_arguments = entity_id, a, b, c.
# From the function definition, we will get keyword_arguments = []
# Now we do not have a way to identify which one is a keyword
# argument and which one is not.
if not called.args.kwarg and callable_name != 'constructor':
if not arg in keyword_args_in_funcdef:
# This try/except block tries to get the function
# name.
try:
func_name = node.func.attrname
except AttributeError:
func_name = node.func.name
self.add_message(
'arg-name-for-non-keyword-arg', node=node,
args=(repr(arg), callable_name, func_name))
def visit_call(self, node):
"""Visits each function call in a lint check.
Args:
node: Call. The current function call node.
"""
called = checker_utils.safe_infer(node.func)
try:
# For the rationale behind the Pylint pragma below,
# see https://stackoverflow.com/a/35701863/8115428
called, implicit_args, callable_name = (
typecheck._determine_callable(called)) # pylint: disable=protected-access
except ValueError:
return
if called.args.args is None:
# Built-in functions have no argument information.
return
if len(called.argnames()) != len(set(called.argnames())):
return
# Build the set of keyword arguments and count the positional arguments.
call_site = astroid.arguments.CallSite.from_call(node)
num_positional_args = len(call_site.positional_arguments)
keyword_args = list(call_site.keyword_arguments.keys())
already_filled_positionals = getattr(called, 'filled_positionals', 0)
already_filled_keywords = getattr(called, 'filled_keywords', {})
keyword_args += list(already_filled_keywords)
num_positional_args += already_filled_positionals
num_positional_args += implicit_args
# Analyze the list of formal parameters.
num_mandatory_parameters = len(called.args.args) - len(
called.args.defaults)
parameters = []
parameter_name_to_index = {}
for i, arg in enumerate(called.args.args):
assert isinstance(arg, astroid.AssignName)
name = arg.name
parameter_name_to_index[name] = i
if i >= num_mandatory_parameters:
defval = called.args.defaults[i - num_mandatory_parameters]
else:
defval = None
parameters.append([(name, defval), False])
num_positional_args_unused = num_positional_args
# The list below will store all the keyword arguments present in the
# function definition.
keyword_args_in_funcdef = []
# Check that all parameters with a default value have
# been called explicitly.
for [(name, defval), _] in parameters:
if defval:
keyword_args_in_funcdef.append(name)
num_positional_args_unused = (
self._check_non_explicit_keyword_args(
node, name, callable_name, keyword_args,
num_positional_args_unused, num_mandatory_parameters))
self._check_argname_for_nonkeyword_arg(
node, called, callable_name, keyword_args, keyword_args_in_funcdef)
class HangingIndentChecker(checkers.BaseChecker):
"""Custom pylint checker which checks for break after parenthesis in case
of hanging indentation.
"""
__implements__ = interfaces.ITokenChecker
name = 'hanging-indent'
priority = -1
msgs = {
'C0002': (
(
'There should be a break after parenthesis when content within '
'parenthesis spans multiple lines.'),
'no-break-after-hanging-indent',
(
'If something within parenthesis extends along multiple lines, '
'break after opening parenthesis.')
),
}
def process_tokens(self, tokens):
"""Process tokens to check if there is a line break after the bracket.
Args:
tokens: astroid.Tokens. Object to process tokens.
"""
escape_character_indicator = '\\'
string_indicator = '\''
excluded = False
for (token_type, token, (line_num, _), _, line) in tokens:
# Check if token type is an operator and is either a
# left parenthesis '(' or a right parenthesis ')'.
if token_type == tokenize.OP and token in ('(', ')'):
line = line.strip()
# Exclude 'if', 'elif', 'while' statements.
if line.startswith(('if ', 'while ', 'elif ')):
excluded = True
# Skip check if there is a comment at the end of line.
if excluded:
split_line = line.split()
if '#' in split_line:
comment_index = split_line.index('#')
if split_line[comment_index - 1].endswith('):'):
excluded = False
elif line.endswith('):'):
excluded = False
if excluded:
continue
bracket_count = 0
line_length = len(line)
escape_character_found = False
in_string = False
for char_num in range(line_length):
char = line[char_num]
if in_string and (
char == escape_character_indicator or
escape_character_found):
escape_character_found = not escape_character_found
continue
# Check if we found the string indicator and flip the
# in_string boolean.
if char == string_indicator:
in_string = not in_string
# Ignore anything inside a string.
if in_string:
continue
if char == '(':
if bracket_count == 0:
position = char_num
bracket_count += 1
elif char == ')' and bracket_count > 0:
bracket_count -= 1
if bracket_count > 0 and position + 1 < line_length:
# Allow the use of '[', ']', '{', '}' after the parenthesis.
separators = set('[{( ')
if line[line_length - 1] in separators:
continue
content = line[position + 1:]
# Skip check if there is nothing after the bracket.
split_content = content.split()
# Skip check if there is a comment at the end of line.
if '#' in split_content:
comment_index = split_content.index('#')
if comment_index == 0:
continue
else:
last_content_before_comment = (
split_content[comment_index - 1])
if last_content_before_comment.endswith(
('(', '[', '{')
):
continue
self.add_message(
'no-break-after-hanging-indent', line=line_num)
# The following class was derived from
# https://github.com/PyCQA/pylint/blob/377cc42f9e3116ff97cddd4567d53e9a3e24ebf9/pylint/extensions/docparams.py#L26
class DocstringParameterChecker(checkers.BaseChecker):
"""Checker for Sphinx, Google, or Numpy style docstrings
* Check that all function, method and constructor parameters are mentioned
in the params and types part of the docstring. Constructor parameters
can be documented in either the class docstring or ``__init__`` docstring,
but not both.
* Check that there are no naming inconsistencies between the signature and
the documentation, i.e. also report documented parameters that are missing
in the signature. This is important to find cases where parameters are
renamed only in the code, not in the documentation.
* Check that all explicitly raised exceptions in a function are documented
in the function docstring. Caught exceptions are ignored.
Args:
linter: Pylinter. The linter object.
"""
__implements__ = interfaces.IAstroidChecker
name = 'parameter_documentation'
msgs = {
'W9005': (
'"%s" has constructor parameters '
'documented in class and __init__',
'multiple-constructor-doc',
'Please remove parameter declarations '
'in the class or constructor.'),
'W9006': (
'"%s" not documented as being raised',
'missing-raises-doc',
'Please document exceptions for '
'all raised exception types.'),
'W9008': (
'Redundant returns documentation',
'redundant-returns-doc',
'Please remove the return/rtype '
'documentation from this method.'),
'W9010': (
'Redundant yields documentation',
'redundant-yields-doc',
'Please remove the yields documentation from this method.'),
'W9011': (
'Missing return documentation',
'missing-return-doc',
'Please add documentation about what this method returns.',
{'old_names': [('W9007', 'missing-returns-doc')]}),
'W9012': (
'Missing return type documentation',
'missing-return-type-doc',
'Please document the type returned by this method.',
# We can't use the same old_name for two different warnings
# {'old_names': [('W9007', 'missing-returns-doc')]}.
),
'W9013': (
'Missing yield documentation',
'missing-yield-doc',
'Please add documentation about what this generator yields.',
{'old_names': [('W9009', 'missing-yields-doc')]}),
'W9014': (
'Missing yield type documentation',
'missing-yield-type-doc',
'Please document the type yielded by this method.',
),
'W9015': (
'"%s" missing in parameter documentation',
'missing-param-doc',
'Please add parameter declarations for all parameters.',
{'old_names': [('W9003', 'old-missing-param-doc')]}),
'W9016': (
'"%s" missing in parameter type documentation',
'missing-type-doc',
'Please add parameter type declarations for all parameters.'
),
'W9017': (
'"%s" differing in parameter documentation',
'differing-param-doc',
'Please check parameter names in declarations.',
),
'W9018': (
'"%s" differing in parameter type documentation',
'differing-type-doc',
'Please check parameter names in type declarations.',
),
'W9019': (
'Line starting with "%s" requires 4 space indentation relative to'
' args line indentation',
'4-space-indentation-for-arg-parameters-doc',
'Please use 4 space indentation in parameter definitions relative'
' to the args line indentation.'
),
'W9020': (
'Line starting with "%s" requires 8 space indentation relative to'
' args line indentation',
'8-space-indentation-for-arg-in-descriptions-doc',
'Please indent wrap-around descriptions by 8 relative to the args'
' line indentation.'
),
'W9021': (
'Args: indentation is incorrect, must be at the outermost'
' indentation level.',
'incorrect-indentation-for-arg-header-doc',
'Please indent args line to the outermost indentation level.'
),
'W9022': (
'4 space indentation in docstring.',
'4-space-indentation-in-docstring',
'Please use 4 space indentation for parameters relative to section'
' headers.'
),
'W9023': (
'8 space indentation in docstring.',
'8-space-indentation-in-docstring',
'Please use 8 space indentation in wrap around messages'
' relative to section headers.'
),
'W9024': (
'Raises section should be the following form: Exception_name. '
'Description.',
'malformed-raises-section',
'The parameter is incorrectly formatted.'
),
'W9025': (
'Period is not used at the end of the docstring.',
'no-period-used',
'Please use a period at the end of the docstring,'
),
'W9026': (
'Multiline docstring should end with a new line.',
'no-newline-used-at-end',
'Please end multiline docstring with a new line.'
),
'W9027': (
'Single line docstring should not span two lines.',
'single-line-docstring-span-two-lines',
'Please do not use two lines for a single line docstring. '
'If line length exceeds 80 characters, '
'convert the single line docstring to a multiline docstring.'
),
'W9028': (
'Empty line before the end of multi-line docstring.',
'empty-line-before-end',
'Please do not use empty line before '
'the end of the multi-line docstring.'
),
'W9029': (
'Space after """ in docstring.',
'space-after-triple-quote',
'Please do not use space after """ in docstring.'
),
'W9030': (
'Missing single newline below class docstring.',
'newline-below-class-docstring',
'Please add a single newline below class docstring.'
),
'W9031': (
'Files must have a single newline above args in doc string.',
'single-space-above-args',
'Please enter a single newline above args in doc string.'
),
'W9032': (
'Files must have a single newline above returns in doc string.',
'single-space-above-returns',
'Please enter a single newline above returns in doc string.'
),
'W9033': (
'Files must have a single newline above raises in doc string.',
'single-space-above-raises',
'Please enter a single newline above raises in doc string.'
),
'W9034': (
'Files must have a single newline above yield in doc string.',
'single-space-above-yield',
'Please enter a single newline above yield in doc string.'
),
'W9035': (
'Arguments should be in following form: variable_name: typeinfo. '
'Description.',
'malformed-args-section',
'The parameter is incorrectly formatted.'
),
'W9036': (
'Returns should be in the following form: typeinfo. Description.',
'malformed-returns-section',
'The parameter is incorrectly formatted.'
),
'W9037': (
'Yields should be in the following form: typeinfo. Description.',
'malformed-yields-section',
'The parameter is incorrectly formatted.'
),
'W9038': (
'Arguments starting with *args should be formatted in the following'
' form: *args: list(*). Description.',
'malformed-args-argument',
'The parameter is incorrectly formatted.'
)
}
options = (
(
'accept-no-param-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing parameter '
'documentation in the docstring of a '
'function that has parameters.'
}),
(
'accept-no-raise-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing raises '
'documentation in the docstring of a function that '
'raises an exception.'
}),
(
'accept-no-return-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing return '
'documentation in the docstring of a function that '
'returns a statement.'
}),
(
'accept-no-yields-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing yields '
'documentation in the docstring of a generator.'
}),
)
priority = -2
constructor_names = {'__init__', '__new__'}
not_needed_param_in_docstring = {'self', 'cls'}
docstring_sections = {'Raises:', 'Returns:', 'Yields:'}
# Docstring section headers split up into arguments, returns, yields
# and raises sections signifying that we are currently parsing the
# corresponding section of that docstring.
DOCSTRING_SECTION_RETURNS = 'returns'
DOCSTRING_SECTION_YIELDS = 'yields'
DOCSTRING_SECTION_RAISES = 'raises'
def visit_classdef(self, node):
"""Visit each class definition in a module and check if there is a
single new line below each class docstring.
Args:
node: astroid.nodes.ClassDef. Node for a class definition
in the AST.
"""
# Check if the given node has docstring.
if node.doc is None:
return
line_number = node.fromlineno
# Iterate till the start of docstring.
while True:
line = linecache.getline(node.root().file, line_number).strip()
if line.startswith(('"""', '\'\'\'', '\'', '"')):
break
line_number += 1
doc_length = len(node.doc.split('\n'))
line_number += doc_length
first_line_after_doc = linecache.getline(
node.root().file, line_number).strip()
second_line_after_doc = linecache.getline(
node.root().file, line_number + 1).strip()
if first_line_after_doc != '':
self.add_message('newline-below-class-docstring', node=node)
elif second_line_after_doc == '':
self.add_message('newline-below-class-docstring', node=node)
def visit_functiondef(self, node):
"""Called for function and method definitions (def).
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
node_doc = docstrings_checker.docstringify(node.doc)
self.check_functiondef_params(node, node_doc)
self.check_functiondef_returns(node, node_doc)
self.check_functiondef_yields(node, node_doc)
self.check_docstring_style(node)
self.check_docstring_section_indentation(node)
self.check_typeinfo(node, node_doc)
def check_typeinfo(self, node, node_doc):
"""Checks whether all parameters in a function definition are
properly formatted.
Args:
node: astroid.node.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
# The regexes are taken from the pylint codebase and are modified
# according to our needs. Link: https://github.com/PyCQA/pylint/blob/
# e89c361668aeead9fd192d5289c186611ef779ca/pylint/extensions/
# _check_docs_utils.py#L428.
re_param_line = re.compile(
r"""
\s* \*{{0,2}}(\w+) # identifier potentially with asterisks
\s* ( [:]
\s*
({type}|\S*|[\s\S]*)
(?:,\s+optional)?
[.]+\s )+ \s*
\s* [A-Z0-9](.*)[.\]}}\)]+$ # beginning of optional description
""".format(
type=_check_docs_utils.GoogleDocstring.re_multiple_type,
), flags=re.X | re.S | re.M)
re_returns_line = re.compile(
r"""
\s* (({type}|\S*|[\s\S]*).[.]+\s)+ # identifier
\s* [A-Z0-9](.*)[.\]}}\)]+$ # beginning of description
""".format(
type=_check_docs_utils.GoogleDocstring.re_multiple_type,
), flags=re.X | re.S | re.M)
re_yields_line = re_returns_line
re_raise_line = re.compile(
r"""
\s* ({type}[.])+ # identifier
\s* [A-Z0-9](.*)[.\]}}\)]+$ # beginning of description
""".format(
type=_check_docs_utils.GoogleDocstring.re_multiple_type,
), flags=re.X | re.S | re.M)
# We need to extract the information from the given section for that
# we need to use _parse_section as this will extract all the arguments
# from the Args section, as this is a private method hence we need to
# use the pylint pragma to escape the pylint warning.
if node_doc.has_params():
entries = node_doc._parse_section( # pylint: disable=protected-access
_check_docs_utils.GoogleDocstring.re_param_section)
for entry in entries:
if entry.lstrip().startswith('*args') and not (
entry.lstrip().startswith('*args: list(*)')):
self.add_message('malformed-args-argument', node=node)
match = re_param_line.match(entry)
if not match:
self.add_message('malformed-args-section', node=node)
# We need to extract the information from the given section for that
# we need to use _parse_section as this will extract all the returns
# from the Returns section, as this is a private method hence we need to
# use the pylint pragma to escape the pylint warning.
if node_doc.has_returns():
entries = node_doc._parse_section( # pylint: disable=protected-access
_check_docs_utils.GoogleDocstring.re_returns_section)
entries = [''.join(entries)]
for entry in entries:
match = re_returns_line.match(entry)
if not match:
self.add_message('malformed-returns-section', node=node)
# We need to extract the information from the given section for that
# we need to use _parse_section as this will extract all the yields
# from the Yields section, as this is a private method hence we need to
# use the pylint pragma to escape the pylint warning.
if node_doc.has_yields():
entries = node_doc._parse_section( # pylint: disable=protected-access
_check_docs_utils.GoogleDocstring.re_yields_section)
entries = [''.join(entries)]
for entry in entries:
match = re_yields_line.match(entry)
if not match:
self.add_message('malformed-yields-section', node=node)
# We need to extract the information from the given section for that
# we need to use _parse_section as this will extract all the exceptions
# from the Raises section, as this is a private method hence we need to
# use the pylint pragma to escape the pylint warning.
if node_doc.exceptions():
entries = node_doc._parse_section( # pylint: disable=protected-access
_check_docs_utils.GoogleDocstring.re_raise_section)
for entry in entries:
match = re_raise_line.match(entry)
if not match:
self.add_message('malformed-raises-section', node=node)
def check_functiondef_params(self, node, node_doc):
"""Checks whether all parameters in a function definition are
documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
node_allow_no_param = None
if node.name in self.constructor_names:
class_node = checker_utils.node_frame_class(node)
if class_node is not None:
class_doc = docstrings_checker.docstringify(class_node.doc)
self.check_single_constructor_params(
class_doc, node_doc, class_node)
# __init__ or class docstrings can have no parameters documented
# as long as the other documents them.
node_allow_no_param = (
class_doc.has_params() or
class_doc.params_documented_elsewhere() or
None
)
class_allow_no_param = (
node_doc.has_params() or
node_doc.params_documented_elsewhere() or
None
)
self.check_arguments_in_docstring(
class_doc, node.args, class_node,
accept_no_param_doc=class_allow_no_param)
self.check_arguments_in_docstring(
node_doc, node.args, node,
accept_no_param_doc=node_allow_no_param)
def check_docstring_style(self, node):
"""It fetches a function node and extract the class node from function
node if it is inside a class body and passes it to
check_docstring_structure which checks whether the docstring has a
space at the beginning and a period at the end.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
if node.name in self.constructor_names:
class_node = checker_utils.node_frame_class(node)
if class_node is not None:
self.check_docstring_structure(class_node)
self.check_docstring_structure(node)
def check_newline_above_args(self, node, docstring):
"""Checks to ensure that there is a single space above the
argument parameters in the docstring.
Args:
node: astroid.node.Function. Node for a function or method
definition in the AST.
docstring: list(str). Function docstring in splitted by newlines.
"""
blank_line_counter = 0
for line in docstring:
line = line.strip()
if line == '':
blank_line_counter += 1
if blank_line_counter == 0 or blank_line_counter > 1:
if line == 'Args:':
self.add_message(
'single-space-above-args', node=node)
elif line == 'Returns:':
self.add_message(
'single-space-above-returns', node=node)
elif line == 'Raises:':
self.add_message(
'single-space-above-raises', node=node)
elif line == 'Yields:':
self.add_message(
'single-space-above-yield', node=node)
if line != '':
blank_line_counter = 0
def check_docstring_structure(self, node):
"""Checks whether the docstring has the correct structure i.e.
do not have space at the beginning and have a period at the end of
docstring.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
if node.doc:
docstring = node.doc.splitlines()
# Check for space after """ in docstring.
if len(docstring[0]) > 0 and docstring[0][0] == ' ':
self.add_message('space-after-triple-quote', node=node)
# Check if single line docstring span two lines.
if len(docstring) == 2 and docstring[-1].strip() == '':
self.add_message(
'single-line-docstring-span-two-lines', node=node)
# Check for punctuation at end of a single line docstring.
elif (len(docstring) == 1 and docstring[-1][-1] not in
ALLOWED_TERMINATING_PUNCTUATIONS):
self.add_message('no-period-used', node=node)
# Check for punctuation at the end of a multiline docstring.
elif len(docstring) > 1:
if docstring[-2].strip() == '':
self.add_message('empty-line-before-end', node=node)
elif docstring[-1].strip() != '':
self.add_message(
'no-newline-used-at-end', node=node)
elif (docstring[-2][-1] not in
ALLOWED_TERMINATING_PUNCTUATIONS and not
any(word in docstring[-2] for word in EXCLUDED_PHRASES)):
self.add_message('no-period-used', node=node)
def check_docstring_section_indentation(self, node):
"""Checks whether the function argument definitions ("Args": section,
"Returns": section, "Yield": section, "Raises: section) are indented
properly. Parameters should be indented by 4 relative to the 'Args:'
'Return:', 'Raises:', 'Yield:' line and any wrap-around descriptions
should be indented by 8.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
arguments_node = node.args
expected_argument_names = set(
None if (arg.name in self.not_needed_param_in_docstring)
else (arg.name + ':') for arg in arguments_node.args)
currently_in_args_section = False
# When we are in the args section and a line ends in a colon,
# we can ignore the indentation styling in the next section of
# description, hence a freeform section.
currently_in_freeform_section = False
args_indentation = 0
if node.doc:
current_docstring_section = None
in_description = False
args_indentation_in_spaces = 0
docstring = node.doc.splitlines()
self.check_newline_above_args(node, docstring)
for line in docstring:
stripped_line = line.lstrip()
current_line_indentation = (
len(line) - len(stripped_line))
parameter = re.search(
'^[^:]+:',
stripped_line)
# Check for empty lines and ignore them.
if len(line.strip()) == 0:
continue
# If line starts with Returns: , it is the header of a Returns
# subsection.
if stripped_line.startswith('Returns:'):
current_docstring_section = (
self.DOCSTRING_SECTION_RETURNS)
in_freeform_section = False
in_description = False
args_indentation_in_spaces = current_line_indentation
# If line starts with Raises: , it is the header of a Raises
# subsection.
elif stripped_line.startswith('Raises:'):
current_docstring_section = (
self.DOCSTRING_SECTION_RAISES)
in_freeform_section = False
in_description = False
args_indentation_in_spaces = current_line_indentation
# If line starts with Yields: , it is the header of a Yields
# subsection.
elif stripped_line.startswith('Yields:'):
current_docstring_section = (
self.DOCSTRING_SECTION_YIELDS)
in_freeform_section = False
in_description = False
args_indentation_in_spaces = current_line_indentation
# Check if we are in a docstring raises section.
elif (current_docstring_section and
(current_docstring_section ==
self.DOCSTRING_SECTION_RAISES)):
# In the raises section, if we see this regex expression, we
# can assume it's the start of a new parameter definition.
# We check the indentation of the parameter definition.
if re.search(r'^[a-zA-Z0-9_\.\*]+[.] ',
stripped_line):
if current_line_indentation != (
args_indentation_in_spaces + 4):
self.add_message(
'4-space-indentation-in-docstring',
node=node)
in_description = True
# In a description line that is wrapped around (doesn't
# start off with the parameter name), we need to make sure
# the indentation is 8.
elif in_description:
if current_line_indentation != (
args_indentation_in_spaces + 8):
self.add_message(
'8-space-indentation-in-docstring',
node=node)
# Check if we are in a docstring returns or yields section.
# NOTE: Each function should only have one yield or return
# object. If a tuple is returned, wrap both in a tuple parameter
# section.
elif (current_docstring_section and
(current_docstring_section ==
self.DOCSTRING_SECTION_RETURNS)
or (current_docstring_section ==
self.DOCSTRING_SECTION_YIELDS)):
# Check for the start of a new parameter definition in the
# format "type (elaboration)." and check the indentation.
if (re.search(r'^[a-zA-Z_() -:,\*]+\.',
stripped_line) and not in_description):
if current_line_indentation != (
args_indentation_in_spaces + 4):
self.add_message(
'4-space-indentation-in-docstring',
node=node)
# If the line ends with a colon, we can assume the rest
# of the section is free form.
if re.search(r':$', stripped_line):
in_freeform_section = True
in_description = True
# In a description line of a returns or yields, we keep the
# indentation the same as the definition line.
elif in_description:
if (current_line_indentation != (
args_indentation_in_spaces + 4)
and not in_freeform_section):
self.add_message(
'4-space-indentation-in-docstring',
node=node)
# If the description line ends with a colon, we can
# assume the rest of the section is free form.
if re.search(r':$', stripped_line):
in_freeform_section = True
# Check for the start of an Args: section and check the correct
# indentation.
elif stripped_line.startswith('Args:'):
args_indentation = current_line_indentation
# The current args indentation is incorrect.
if current_line_indentation % 4 != 0:
self.add_message(
'incorrect-indentation-for-arg-header-doc',
node=node)
# Since other checks are based on relative indentation,
# we need to fix this indentation first.
break
currently_in_args_section = True
# Check for parameter section header by checking that the
# parameter is in the function arguments set. We also check for
# arguments that start with * which means it's autofill and will
# not appear in the node args list so we handle those too.
elif (currently_in_args_section and
parameter and ((
parameter.group(0).strip('*')
in expected_argument_names) or
re.search(r'\*[^ ]+: ', stripped_line))):
words_in_line = stripped_line.split(' ')
currently_in_freeform_section = False
# Check if the current parameter section indentation is
# correct.
if current_line_indentation != (
args_indentation + 4):
# Use the first word in the line to identify the error.
beginning_of_line = (
words_in_line[0]
if words_in_line else None)
self.add_message(
'4-space-indentation-for-arg-parameters-doc',
node=node,
args=(beginning_of_line))
# If the line ends with a colon, that means
# the next subsection of description is free form.
if line.endswith(':'):
currently_in_freeform_section = True
# All other lines can be treated as description.
elif currently_in_args_section:
# If it is not a freeform section, we check the indentation.
words_in_line = stripped_line.split(' ')
if (not currently_in_freeform_section
and current_line_indentation != (
args_indentation + 8)):
# Use the first word in the line to identify the error.
beginning_of_line = (
words_in_line[0]
if words_in_line else None)
self.add_message(
'8-space-indentation-for-arg-in-descriptions-doc',
node=node,
args=(beginning_of_line))
# If the line ends with a colon, that
# means the next subsection of description is free form.
if line.endswith(':'):
currently_in_freeform_section = True
def check_functiondef_returns(self, node, node_doc):
"""Checks whether a function documented with a return value actually has
a return statement in its definition.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
if not node_doc.supports_yields and node.is_generator():
return
return_nodes = node.nodes_of_class(astroid.Return)
if ((
node_doc.has_returns() or node_doc.has_rtype()) and
not any(
docstrings_checker.returns_something(
ret_node) for ret_node in return_nodes)):
self.add_message(
'redundant-returns-doc',
node=node)
def check_functiondef_yields(self, node, node_doc):
"""Checks whether a function documented with a yield value actually has
a yield statement in its definition.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
if not node_doc.supports_yields:
return
if ((node_doc.has_yields() or node_doc.has_yields_type()) and
not node.is_generator()):
self.add_message(
'redundant-yields-doc',
node=node)
def visit_raise(self, node):
"""Visits a function node that raises an exception and verifies that all
exceptions raised in the function definition are documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
expected_excs = docstrings_checker.possible_exc_types(node)
if not expected_excs:
return
if not func_node.doc:
# If this is a property setter,
# the property should have the docstring instead.
setters_property = docstrings_checker.get_setters_property(
func_node)
if setters_property:
func_node = setters_property
doc = docstrings_checker.docstringify(func_node.doc)
if not doc.is_valid():
if doc.doc:
self._handle_no_raise_doc(expected_excs, func_node)
return
found_excs = doc.exceptions()
missing_excs = expected_excs - found_excs
self._add_raise_message(missing_excs, func_node)
def visit_return(self, node):
"""Visits a function node that contains a return statement and verifies
that the return value and the return type are documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
if not docstrings_checker.returns_something(node):
return
func_node = node.frame()
doc = docstrings_checker.docstringify(func_node.doc)
if not doc.is_valid() and self.config.accept_no_return_doc:
return
is_property = checker_utils.decorated_with_property(func_node)
if not (doc.has_returns() or
(doc.has_property_returns() and is_property)):
self.add_message(
'missing-return-doc',
node=func_node
)
if not (doc.has_rtype() or
(doc.has_property_type() and is_property)):
self.add_message(
'missing-return-type-doc',
node=func_node
)
def visit_yield(self, node):
"""Visits a function node that contains a yield statement and verifies
that the yield value and the yield type are documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
func_node = node.frame()
doc = docstrings_checker.docstringify(func_node.doc)
if not doc.is_valid() and self.config.accept_no_yields_doc:
return
doc_has_yields = doc.has_yields()
doc_has_yields_type = doc.has_yields_type()
if not doc_has_yields:
self.add_message(
'missing-yield-doc',
node=func_node
)
if not doc_has_yields_type:
self.add_message(
'missing-yield-type-doc',
node=func_node
)
def visit_yieldfrom(self, node):
"""Visits a function node that contains a yield from statement and
verifies that the yield from value and the yield from type are
documented.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
self.visit_yield(node)
def check_arguments_in_docstring(
self, doc, arguments_node, warning_node, accept_no_param_doc=None):
"""Check that all parameters in a function, method or class constructor
on the one hand and the parameters mentioned in the parameter
documentation (e.g. the Sphinx tags 'param' and 'type') on the other
hand are consistent with each other.
* Undocumented parameters except 'self' are noticed.
* Undocumented parameter types except for 'self' and the ``*<args>``
and ``**<kwargs>`` parameters are noticed.
* Parameters mentioned in the parameter documentation that don't or no
longer exist in the function parameter list are noticed.
* If the text "For the parameters, see" or "For the other parameters,
see" (ignoring additional whitespace) is mentioned in the docstring,
missing parameter documentation is tolerated.
* If there's no Sphinx style, Google style or NumPy style parameter
documentation at all, i.e. ``:param`` is never mentioned etc., the
checker assumes that the parameters are documented in another format
and the absence is tolerated.
Args:
doc: str. Docstring for the function, method or class.
arguments_node: astroid.scoped_nodes.Arguments. Arguments node
for the function, method or class constructor.
warning_node: astroid.scoped_nodes.Node. The node to assign
the warnings to.
accept_no_param_doc: bool|None. Whether or not to allow
no parameters to be documented. If None then
this value is read from the configuration.
"""
# Tolerate missing param or type declarations if there is a link to
# another method carrying the same name.
if not doc.doc:
return
if accept_no_param_doc is None:
accept_no_param_doc = self.config.accept_no_param_doc
tolerate_missing_params = doc.params_documented_elsewhere()
# Collect the function arguments.
expected_argument_names = set(
arg.name for arg in arguments_node.args)
expected_argument_names.update(
arg.name for arg in arguments_node.kwonlyargs)
not_needed_type_in_docstring = (
self.not_needed_param_in_docstring.copy())
if arguments_node.vararg is not None:
expected_argument_names.add(arguments_node.vararg)
not_needed_type_in_docstring.add(arguments_node.vararg)
if arguments_node.kwarg is not None:
expected_argument_names.add(arguments_node.kwarg)
not_needed_type_in_docstring.add(arguments_node.kwarg)
params_with_doc, params_with_type = doc.match_param_docs()
# Tolerate no parameter documentation at all.
if (not params_with_doc and not params_with_type
and accept_no_param_doc):
tolerate_missing_params = True
def _compare_missing_args(
found_argument_names, message_id, not_needed_names):
"""Compare the found argument names with the expected ones and
generate a message if there are arguments missing.
Args:
found_argument_names: set. Argument names found in the
docstring.
message_id: str. Pylint message id.
not_needed_names: set(str). Names that may be omitted.
"""
if not tolerate_missing_params:
missing_argument_names = (
(expected_argument_names - found_argument_names)
- not_needed_names)
if missing_argument_names:
self.add_message(
message_id,
args=(', '.join(
sorted(missing_argument_names)),),
node=warning_node)
def _compare_different_args(
found_argument_names, message_id, not_needed_names):
"""Compare the found argument names with the expected ones and
generate a message if there are extra arguments found.
Args:
found_argument_names: set. Argument names found in the
docstring.
message_id: str. Pylint message id.
not_needed_names: set(str). Names that may be omitted.
"""
differing_argument_names = (
(expected_argument_names ^ found_argument_names)
- not_needed_names - expected_argument_names)
if differing_argument_names:
self.add_message(
message_id,
args=(', '.join(
sorted(differing_argument_names)),),
node=warning_node)
_compare_missing_args(
params_with_doc, 'missing-param-doc',
self.not_needed_param_in_docstring)
_compare_missing_args(
params_with_type, 'missing-type-doc', not_needed_type_in_docstring)
_compare_different_args(
params_with_doc, 'differing-param-doc',
self.not_needed_param_in_docstring)
_compare_different_args(
params_with_type, 'differing-type-doc',
not_needed_type_in_docstring)
def check_single_constructor_params(self, class_doc, init_doc, class_node):
"""Checks whether a class and corresponding init() method are
documented. If both of them are documented, it adds an error message.
Args:
class_doc: Docstring. Pylint docstring class instance representing
a class's docstring.
init_doc: Docstring. Pylint docstring class instance representing
a method's docstring, the method here is the constructor method
for the above class.
class_node: astroid.scoped_nodes.Function. Node for class definition
in AST.
"""
if class_doc.has_params() and init_doc.has_params():
self.add_message(
'multiple-constructor-doc',
args=(class_node.name,),
node=class_node)
def _handle_no_raise_doc(self, excs, node):
"""Checks whether the raised exception in a function has been
documented, add a message otherwise.
Args:
excs: list(str). A list of exception types.
node: astroid.scoped_nodes.Function. Node to access module content.
"""
if self.config.accept_no_raise_doc:
return
self._add_raise_message(excs, node)
def _add_raise_message(self, missing_excs, node):
"""Adds a message on :param:`node` for the missing exception type.
Args:
missing_excs: list(Exception). A list of missing exception types.
node: astroid.node_classes.NodeNG. The node show the message on.
"""
if not missing_excs:
return
self.add_message(
'missing-raises-doc',
args=(', '.join(sorted(missing_excs)),),
node=node)
class ImportOnlyModulesChecker(checkers.BaseChecker):
"""Checker for import-from statements. It checks that
modules are only imported.
"""
__implements__ = interfaces.IAstroidChecker
name = 'import-only-modules'
priority = -1
msgs = {
'C0003': (
'Import \"%s\" from \"%s\" is not a module.',
'import-only-modules',
'Modules should only be imported.',
),
}
# If import from any of these is made, it may not be a module.
EXCLUDED_IMPORT_MODULES = [
'__future__',
'typing',
'mypy_imports',
'typing_extensions'
]
@checker_utils.check_messages('import-only-modules')
def visit_importfrom(self, node):
"""Visits all import-from statements in a python file and checks that
modules are imported. It then adds a message accordingly.
Args:
node: astroid.node_classes.ImportFrom. Node for a import-from
statement in the AST.
"""
try:
imported_module = node.do_import_module(node.modname)
except astroid.AstroidBuildingException:
return
if node.modname in self.EXCLUDED_IMPORT_MODULES:
return
modname = node.modname
for (name, _) in node.names:
try:
imported_module.import_module(name, True)
except astroid.AstroidImportError:
self.add_message(
'import-only-modules',
node=node,
args=(name, modname),
)
class BackslashContinuationChecker(checkers.BaseChecker):
"""Custom pylint checker which checks that backslash is not used
for continuation.
"""
__implements__ = interfaces.IRawChecker
name = 'backslash-continuation'
priority = -1
msgs = {
'C0004': (
(
'Backslash should not be used to break continuation lines. '
'Use braces to break long lines.'),
'backslash-continuation',
'Use braces to break long lines instead of backslash.'
),
}
def process_module(self, node):
"""Process a module.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
file_content = read_from_node(node)
for (line_num, line) in enumerate(file_content):
if line.rstrip('\r\n').endswith('\\'):
self.add_message(
'backslash-continuation', line=line_num + 1)
class FunctionArgsOrderChecker(checkers.BaseChecker):
"""Custom pylint checker which checks the order of arguments in function
definition.
"""
__implements__ = interfaces.IAstroidChecker
name = 'function-args-order'
priority = -1
msgs = {
'C0005': (
'Wrong order of arguments in function definition '
'\'self\' should come first.',
'function-args-order-self',
'\'self\' should come first',),
'C0006': (
'Wrong order of arguments in function definition '
'\'cls\' should come first.',
'function-args-order-cls',
'\'cls\' should come first'),
}
def visit_functiondef(self, node):
"""Visits every function definition in the python file and check the
function arguments order. It then adds a message accordingly.
Args:
node: astroid.scoped_nodes.Function. Node for a function or method
definition in the AST.
"""
args_list = [args.name for args in node.args.args]
if 'self' in args_list and args_list[0] != 'self':
self.add_message('function-args-order-self', node=node)
elif 'cls' in args_list and args_list[0] != 'cls':
self.add_message('function-args-order-cls', node=node)
class RestrictedImportChecker(checkers.BaseChecker):
"""Custom pylint checker which checks layers importing modules
from their respective restricted layers.
"""
__implements__ = interfaces.IAstroidChecker
name = 'invalid-import'
priority = -1
msgs = {
'C0009': (
'Importing %s layer in %s layer is prohibited.',
'invalid-import',
'Storage layer and domain layer must not import'
'domain layer and controller layer respectively.'),
}
options = (
(
'forbidden-imports',
{
'default': [],
'type': 'csv',
'metavar': '<comma separated list>',
'help': (
'List of disallowed imports. The items start with '
'the module name where the imports are forbidden, the path '
'needs to be absolute with the root module name included '
'(e.g. \'oppia.core.domain\'), then comes '
'the \':\' separator, and after that a list of the imports '
'that are forbidden separated by \'|\', these imports are '
'relative to the root module (e.g. \'core.domain\').'
)
}
),
)
def __init__(self, linter=None):
super(RestrictedImportChecker, self).__init__(linter=linter)
self._module_to_forbidden_imports = []
def open(self):
"""Parse the forbidden imports."""
splitted_module_to_forbidden_imports = [
forbidden_import.strip().split(':')
for forbidden_import in self.config.forbidden_imports
]
self._module_to_forbidden_imports = list(
(
forbidden_imports[0].strip(),
[import_.strip() for import_ in forbidden_imports[1].split('|')]
) for forbidden_imports in splitted_module_to_forbidden_imports
)
def _iterate_forbidden_imports(self, node):
"""Yields pairs of module name and forbidden imports.
Args:
node: astroid.node_classes.Import. Node for a import statement
in the AST.
Yields:
tuple(str, str). Yields pair of module name and forbidden import.
"""
modnode = node.root()
for module_name, forbidden_imports in self._module_to_forbidden_imports:
for forbidden_import in forbidden_imports:
if module_name in modnode.name and not '_test' in modnode.name:
yield module_name, forbidden_import
def _add_invalid_import_message(self, node, module_name, forbidden_import):
"""Adds pylint message about the invalid import.
Args:
node: astroid.node_classes.Import. Node for a import statement
in the AST.
module_name: str. The module that was checked.
forbidden_import: str. The import that was invalid.
"""
self.add_message(
'invalid-import',
node=node,
args=(
forbidden_import.split('.')[-1],
module_name.split('.')[-1]
),
)
def visit_import(self, node):
"""Visits every import statement in the file.
Args:
node: astroid.node_classes.Import. Node for a import statement
in the AST.
"""
names = [name for name, _ in node.names]
for module_name, forbidden_import in self._iterate_forbidden_imports(
node):
if any(forbidden_import in name for name in names):
self._add_invalid_import_message(
node, module_name, forbidden_import)
def visit_importfrom(self, node):
"""Visits all import-from statements in a python file and checks that
modules are imported. It then adds a message accordingly.
Args:
node: astroid.node_classes.ImportFrom. Node for a import-from
statement in the AST.
"""
for module_name, forbidden_import in self._iterate_forbidden_imports(
node):
if forbidden_import in node.modname:
self._add_invalid_import_message(
node, module_name, forbidden_import)
class SingleCharAndNewlineAtEOFChecker(checkers.BaseChecker):
"""Checker for single character files and newline at EOF."""
__implements__ = interfaces.IRawChecker
name = 'newline-at-eof'
priority = -1
msgs = {
'C0007': (
'Files should end in a single newline character.',
'newline-at-eof',
'Please enter a single newline at the end of the file.'),
'C0008': (
'Only one character in file',
'only-one-character',
'Files with only one character are not allowed.'),
}
def process_module(self, node):
"""Process a module.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
file_content = read_from_node(node)
file_length = len(file_content)
if file_length == 1 and len(file_content[0]) == 1:
self.add_message('only-one-character', line=file_length)
if file_length >= 2 and not re.search(r'[^\n]\n', file_content[-1]):
self.add_message('newline-at-eof', line=file_length)
class DivisionOperatorChecker(checkers.BaseChecker):
"""Checks if division operator is used."""
__implements__ = interfaces.IAstroidChecker
name = 'division-operator-used'
priority = -1
msgs = {
'C0015': (
'Please use python_utils.divide() instead of the "/" operator',
'division-operator-used',
'Do not use division operator.'
)
}
def visit_binop(self, node):
"""Visit assign statements to ensure that the division operator('/')
is not used and python_utils.divide() is used instead.
Args:
node: astroid.node.BinOp. Node to access module content.
"""
if node.op == '/':
self.add_message(
'division-operator-used', node=node)
class SingleLineCommentChecker(checkers.BaseChecker):
"""Checks if comments follow correct style."""
__implements__ = interfaces.ITokenChecker
name = 'incorrectly_styled_comment'
priority = -1
msgs = {
'C0016': (
'Invalid punctuation is used.',
'invalid-punctuation-used',
'Please use valid punctuation.'
),
'C0017': (
'Please use single space at beginning of comment.',
'no-space-at-beginning',
'Please use single space at the beginning of comment.'
),
'C0018': (
'Please use a capital letter at the beginning of comment.',
'no-capital-letter-at-beginning',
'Please use capital letter to begin the content of comment.'
),
'C0040': (
'This inline comment does not start with any allowed pragma. Please'
' put this comment in a new line.',
'no-allowed-inline-pragma',
'Inline comments should always start with an allowed inline pragma.'
)
}
options = ((
'allowed-comment-prefixes',
{
'default': ('int', 'str', 'float', 'bool', 'v'),
'type': 'csv', 'metavar': '<comma separated list>',
'help': 'List of allowed prefixes in a comment.'
}
),)
def _check_space_at_beginning_of_comments(self, line, line_num):
"""Checks if the comment starts with a space.
Args:
line: str. The current line of comment.
line_num: int. Line number of the current comment.
"""
if re.search(r'^#[^\s].*$', line) and not line.startswith('#!'):
self.add_message(
'no-space-at-beginning', line=line_num)
def _check_comment_starts_with_capital_letter(self, line, line_num):
"""Checks if the comment starts with a capital letter.
Comments may include a lowercase character at the beginning only if they
start with version info or a data type or a variable name e.g.
"# next_line is of string type." or "# v2 version does not have
ExplorationStats Model." or "# int. The file size, in bytes.".
Args:
line: str. The current line of comment.
line_num: int. Line number of the current comment.
"""
# Check if variable name is used.
if line[1:].startswith(' '):
starts_with_underscore = '_' in line.split()[1]
else:
starts_with_underscore = '_' in line.split()[0]
# Check if allowed prefix is used.
allowed_prefix_is_present = any(
line[2:].startswith(word) for word in
self.config.allowed_comment_prefixes)
# Check if comment contains any excluded phrase.
excluded_phrase_is_present = any(
line[1:].strip().startswith(word) for word in EXCLUDED_PHRASES)
if (re.search(r'^# [a-z].*', line) and not (
excluded_phrase_is_present or
starts_with_underscore or allowed_prefix_is_present)):
self.add_message(
'no-capital-letter-at-beginning', line=line_num)
def _check_punctuation(self, line, line_num):
"""Checks if the comment starts with a correct punctuation.
Args:
line: str. The current line of comment.
line_num: int. Line number of the current comment.
"""
excluded_phrase_is_present_at_end = any(
word in line for word in EXCLUDED_PHRASES)
# Comments must end with the proper punctuation.
last_char_is_invalid = line[-1] not in (
ALLOWED_TERMINATING_PUNCTUATIONS)
excluded_phrase_at_beginning_of_line = any(
line[1:].startswith(word) for word in EXCLUDED_PHRASES)
if (last_char_is_invalid and not (
excluded_phrase_is_present_at_end or
excluded_phrase_at_beginning_of_line)):
self.add_message('invalid-punctuation-used', line=line_num)
def _check_trailing_comment_starts_with_allowed_pragma(
self, line, line_num):
"""Checks if the trailing inline comment starts with a valid and
allowed pragma.
Args:
line: str. The current line of comment.
line_num: int. Line number of the current comment.
"""
comment_start_index = -1
for pos, char in enumerate(line):
if char == '#':
comment_start_index = pos
line = line[comment_start_index:]
self._check_space_at_beginning_of_comments(line, line_num)
allowed_inline_pragma_present = any(
line[2:].startswith(word) for word in
ALLOWED_PRAGMAS_FOR_INLINE_COMMENTS
)
if allowed_inline_pragma_present:
return
self.add_message('no-allowed-inline-pragma', line=line_num)
def process_tokens(self, tokens):
"""Custom pylint checker to ensure that comments follow correct style.
Args:
tokens: list(Token). Object to access all tokens of a module.
"""
prev_line_num = -1
comments_group_list = []
comments_index = -1
for (token_type, _, (line_num, _), _, line) in tokens:
if token_type == tokenize.COMMENT:
line = line.strip()
if line.startswith('#'):
self._check_space_at_beginning_of_comments(line, line_num)
if prev_line_num + 1 == line_num:
comments_group_list[comments_index].append(
(line, line_num))
else:
comments_group_list.append([(line, line_num)])
comments_index += 1
prev_line_num = line_num
else:
self._check_trailing_comment_starts_with_allowed_pragma(
line, line_num)
for comments in comments_group_list:
# Checks first line of comment.
self._check_comment_starts_with_capital_letter(*comments[0])
# Checks last line of comment.
self._check_punctuation(*comments[-1])
class BlankLineBelowFileOverviewChecker(checkers.BaseChecker):
"""Checks if there is a single empty line below the fileoverview docstring.
Note: The check assumes that all files have a file overview. This
assumption is justified because Pylint has an inbuilt check
(missing-docstring) for missing file overviews.
"""
__implements__ = interfaces.IAstroidChecker
name = 'space_between_imports_and_file-overview'
priority = -1
msgs = {
'C0024': (
'Please add an empty line below the fileoverview docstring.',
'no-empty-line-provided-below-fileoverview',
'please provide an empty line below the fileoverview.'
),
'C0025': (
'Single empty line should be provided below the fileoverview.',
'only-a-single-empty-line-should-be-provided',
'please provide an empty line below the fileoverview.'
)
}
def visit_module(self, node):
"""Visit a module to ensure that there is a blank line below
file overview docstring.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
# Check if the given node has docstring.
if node.doc is None:
return
line_number = node.fromlineno
# Iterate till the start of docstring.
while True:
line = linecache.getline(node.root().file, line_number).strip()
if line.startswith(('\'', '"')):
break
line_number += 1
doc_length = len(node.doc.split('\n'))
line_number += doc_length
first_line_after_doc = linecache.getline(
node.root().file, line_number).strip()
second_line_after_doc = linecache.getline(
node.root().file, line_number + 1).strip()
if first_line_after_doc != '':
self.add_message(
'no-empty-line-provided-below-fileoverview', node=node)
elif second_line_after_doc == '':
self.add_message(
'only-a-single-empty-line-should-be-provided', node=node)
class SingleLinePragmaChecker(checkers.BaseChecker):
"""Custom pylint checker which checks if pylint pragma is used to disable
a rule for a single line only.
"""
__implements__ = interfaces.ITokenChecker
name = 'single-line-pragma'
priority = -1
msgs = {
'C0028': (
'Pylint pragmas should be used to disable a rule '
'for a single line only',
'single-line-pragma',
'Please use pylint pragmas to disable a rule for a single line only'
)
}
def process_tokens(self, tokens):
"""Custom pylint checker which allows paramas to disable a rule for a
single line only.
Args:
tokens: Token. Object to access all tokens of a module.
"""
for (token_type, _, (line_num, _), _, line) in tokens:
if token_type == tokenize.COMMENT:
line = line.lstrip()
# Ignore line that is enabling this check.
# Example:
# # pylint: disable=import-only-modules, single-line-pragma
# def func(a, b):
# # pylint: enable=import-only-modules, single-line-pragma
# Now if do not ignore the line with 'enable' statement
# pylint will raise the error of single-line-pragma because
# from here on all this lint check is enabled. So we need to
# ignore this line.
if re.search(r'^(#\s*pylint:)', line):
if 'enable' in line and 'single-line-pragma' in line:
continue
self.add_message(
'single-line-pragma', line=line_num)
class SingleSpaceAfterKeyWordChecker(checkers.BaseChecker):
"""Custom pylint checker which checks that there is a single space
after keywords like `if`, `elif`, `while`, and `yield`.
"""
__implements__ = interfaces.ITokenChecker
name = 'single-space-after-keyword'
priority = -1
msgs = {
'C0029': (
'Please add a single space after `%s` statement.',
'single-space-after-keyword',
'A single space should be added after a keyword.',
),
}
keywords = set(['if', 'elif', 'while', 'yield'])
def process_tokens(self, tokens):
"""Custom pylint checker which makes sure that every keyword is
followed by a single space.
Args:
tokens: Token. Object to access all tokens of a module.
"""
for (token_type, token, (line_num, _), _, line) in tokens:
if token_type == tokenize.NAME and token in self.keywords:
line = line.strip()
# Regex evaluates to True if the line is of the form "if #" or
# "... if #" where # is not a space.
if not re.search(r'(\s|^)' + token + r'(\s[^\s]|$)', line):
self.add_message(
'single-space-after-keyword',
args=(token),
line=line_num)
class InequalityWithNoneChecker(checkers.BaseChecker):
"""Custom pylint checker prohibiting use of "if x != None" and
enforcing use of "if x is not None" instead.
"""
__implements__ = interfaces.IAstroidChecker
name = 'inequality-with-none'
priority = -1
msgs = {
'C0030': (
'Please refrain from using "x != None" '
'and use "x is not None" instead.',
'inequality-with-none',
'Use "is" to assert equality or inequality against None.'
)
}
def visit_compare(self, node):
"""Called for comparisons (a != b).
Args:
node: astroid.node.Compare. A node indicating comparison.
"""
ops = node.ops
for operator, operand in ops:
if operator != '!=':
continue
# Check if value field is in operand node, since
# not all righthand side nodes will have this field.
if 'value' in vars(operand) and operand.value is None:
self.add_message('inequality-with-none', node=node)
class NonTestFilesFunctionNameChecker(checkers.BaseChecker):
"""Custom pylint checker prohibiting use of "test_only" prefix in function
names of non-test files.
"""
__implements__ = interfaces.IAstroidChecker
name = 'non-test-files-function-name-checker'
priority = -1
msgs = {
'C0031': (
'Please change the name of the function so that it does not use '
'"test_only" as its prefix in non-test files.',
'non-test-files-function-name-checker',
'Prohibit use of "test_only" prefix in function names of non-test '
'files.'
)
}
def visit_functiondef(self, node):
"""Visit every function definition and ensure their name doesn't have
test_only as its prefix.
Args:
node: astroid.nodes.FunctionDef. A node for a function or method
definition in the AST.
"""
modnode = node.root()
if modnode.name.endswith('_test'):
return
function_name = node.name
if function_name.startswith('test_only'):
self.add_message(
'non-test-files-function-name-checker', node=node)
class DisallowedFunctionsChecker(checkers.BaseChecker):
"""Custom pylint checker for language specific general purpose
regex checks of functions calls to be removed or replaced.
"""
__implements__ = interfaces.IAstroidChecker
name = 'disallowed-function-calls'
priority = -1
msgs = {
'C0032': (
'Please remove the call to %s.',
'remove-disallowed-function-calls',
(
'Disallows usage of black-listed functions that '
'should be removed.'),
),
'C0033': (
'Please replace the call to %s with %s.',
'replace-disallowed-function-calls',
(
'Disallows usage of black-listed functions that '
'should be replaced by allowed alternatives.'),
),
}
options = (
(
'disallowed-functions-and-replacements-str',
{
'default': (),
'type': 'csv',
'metavar': '<comma separated list>',
'help': (
'List of strings of disallowed function names. '
'Strings should be either in the format (1) "A=>B", '
'where A is the disallowed function and B is the '
'replacement, or (2) in the format "A", which signifies '
'that A should just be removed.')
},
),
(
'disallowed-functions-and-replacements-regex',
{
'default': (),
'type': 'csv',
'metavar': '<comma separated list>',
'help': (
'List of strings of regex to find disallowed function '
'names. Strings should be either in the format "A=>B", '
'where A is a regex for the disallowed function and B '
'is the replacement or in the format "A", which '
' signifies that A should just be removed. '
'An example regex entry is: ".*func=>other", which '
'suggests "somefunc" be replaced by "other".')
},
),)
def __init__(self, linter=None):
super(DisallowedFunctionsChecker, self).__init__(linter=linter)
self.funcs_to_replace_str = {}
self.funcs_to_remove_str = set()
self.funcs_to_replace_regex = []
self.funcs_to_remove_regex = None
def open(self):
self._populate_disallowed_functions_and_replacements_str()
self._populate_disallowed_functions_and_replacements_regex()
def _populate_disallowed_functions_and_replacements_str(self):
"""Parse pylint config entries for replacements of disallowed
functions represented by strings.
"""
for entry in self.config.disallowed_functions_and_replacements_str:
splits = [s.strip() for s in entry.split('=>')]
assert len(splits) in (1, 2)
if len(splits) == 1:
self.funcs_to_remove_str.add(splits[0])
else:
self.funcs_to_replace_str[splits[0]] = splits[1]
def _populate_disallowed_functions_and_replacements_regex(self):
"""Parse pylint config entries for replacements of disallowed
functions represented by regex.
"""
remove_regexes = []
for entry in self.config.disallowed_functions_and_replacements_regex:
splits = [s.strip() for s in entry.split('=>')]
assert len(splits) in (1, 2)
if len(splits) == 1:
remove_regexes.append(splits[0])
else:
rgx = re.compile(r'{}'.format(splits[0]))
self.funcs_to_replace_regex.append((rgx, splits[1]))
# Store removal regexes as one large regex, concatenated by "|".
if len(remove_regexes) > 0:
self.funcs_to_remove_regex = (
re.compile(r'{}'.format('|'.join(remove_regexes))))
def visit_call(self, node):
"""Visit a function call to ensure that the call is
not using any disallowed functions.
Args:
node: astroid.nodes.Call. Node to access call content.
"""
func = node.func.as_string()
if func in self.funcs_to_replace_str:
self.add_message(
'replace-disallowed-function-calls',
node=node, args=(func, self.funcs_to_replace_str[func]))
elif (
func in self.funcs_to_remove_str
or (
self.funcs_to_remove_regex is not None
and self.funcs_to_remove_regex.match(func) is not None
)
):
self.add_message(
'remove-disallowed-function-calls',
node=node, args=func)
else:
# Search through list of replacement regexes entries
# (tuple(rgx, replacement)). If a match is found, return the
# corresponding replacement.
for rgx, replacement in self.funcs_to_replace_regex:
if rgx.match(func) is not None:
self.add_message(
'replace-disallowed-function-calls',
node=node, args=(func, replacement))
break
class DisallowHandlerWithoutSchema(checkers.BaseChecker):
"""Custom pylint checker prohibiting handlers which do not have schema
defined within the class.
"""
__implements__ = interfaces.IAstroidChecker
name = 'disallow-handlers-without-schema'
priority = -1
msgs = {
'C0035': (
'Please add schema in URL_ARGS_PATH_SCHEMA for %s class. \nVisit '
'https://github.com/oppia/oppia/wiki/Writing-schema-for-'
'handler-args'
'to learn how to write schema for handlers.',
'no-schema-for-url-path-elements',
'Enforce writing schema for url path arguments of handler class.'
),
'C0036': (
'Please add schema in HANDLER_ARGS_SCHEMA for %s class. \nVisit '
'https://github.com/oppia/oppia/wiki/Writing-schema-for-'
'handler-args'
'to learn how to write schema for handlers.',
'no-schema-for-handler-args',
'Enforce writing schema for request arguments of handler class.'
),
'C0037': (
'URL_PATH_ARGS_SCHEMAS for %s class must be dict.',
'url-path-args-schemas-must-be-dict',
'Enforce URL_ARGS_PATH_SCHEMAS to be of dict type.'
),
'C0038': (
'HANDLER_ARGS_SCHEMAS for %s class must be dict.',
'handler-args-schemas-must-be-dict',
'Enforce HANDLER_ARGS_SCHEMAS to be of dict type.'
)
}
def check_given_variable_is_a_dict(self, node, variable_name):
"""Checks whether schema variable of a handlers class is of dict type.
Args:
node: astroid.nodes.ClassDef. Node for a class definition
in the AST.
variable_name: str. Name of the variable which contains schemas.
Returns:
bool. Whether schema variable of a class is of dict type.
"""
generator_object_for_value_of_schemas = (
node.locals[variable_name][0].assigned_stmts())
for value_of_schemas in generator_object_for_value_of_schemas:
if value_of_schemas.name != 'dict':
return False
return True
def check_parent_class_is_basehandler(self, node):
"""Checks whether the parent class of given class is BaseHandler.
Args:
node: astroid.nodes.ClassDef. Node for a class definition
in the AST.
Returns:
bool. Whether the parent class of given class is BaseHandler.
"""
for ancestor_node in node.ancestors():
if ancestor_node.name == u'BaseHandler':
return True
return False
def visit_classdef(self, node):
"""Visit each class definition in controllers layer module and check
if it contains schema or not.
Args:
node: astroid.nodes.ClassDef. Node for a class definition
in the AST.
"""
if not self.check_parent_class_is_basehandler(node):
return
if (
node.name in
handler_schema_constants.HANDLER_CLASS_NAMES_WITH_NO_SCHEMA
):
return
if 'URL_PATH_ARGS_SCHEMAS' not in node.locals:
self.add_message(
'no-schema-for-url-path-elements', node=node, args=(node.name))
elif not self.check_given_variable_is_a_dict(
node, 'URL_PATH_ARGS_SCHEMAS'):
self.add_message(
'url-path-args-schemas-must-be-dict',
node=node, args=(node.name))
if 'HANDLER_ARGS_SCHEMAS' not in node.locals:
self.add_message(
'no-schema-for-handler-args', node=node, args=(node.name))
elif not self.check_given_variable_is_a_dict(
node, 'HANDLER_ARGS_SCHEMAS'):
self.add_message(
'handler-args-schemas-must-be-dict',
node=node, args=(node.name))
class DisallowedImportsChecker(checkers.BaseChecker):
"""Check that disallowed imports are not made."""
__implements__ = interfaces.IAstroidChecker
name = 'disallowed-imports'
priority = -1
msgs = {
'C0039': (
'Please use str instead of Text',
'disallowed-text-import',
'Disallow import of Text from typing module',
),
}
def visit_importfrom(self, node):
"""Visits all import-from statements in a python file and ensures that
only allowed imports are made.
Args:
node: astroid.node_classes.ImportFrom. Node for a import-from
statement in the AST.
"""
if node.modname != 'typing':
return
for (name, _) in node.names:
if name == 'Text':
self.add_message('disallowed-text-import', node=node)
def register(linter):
"""Registers the checker with pylint.
Args:
linter: Pylinter. The Pylinter object.
"""
linter.register_checker(ExplicitKeywordArgsChecker(linter))
linter.register_checker(HangingIndentChecker(linter))
linter.register_checker(DocstringParameterChecker(linter))
linter.register_checker(ImportOnlyModulesChecker(linter))
linter.register_checker(BackslashContinuationChecker(linter))
linter.register_checker(FunctionArgsOrderChecker(linter))
linter.register_checker(RestrictedImportChecker(linter))
linter.register_checker(SingleCharAndNewlineAtEOFChecker(linter))
linter.register_checker(DivisionOperatorChecker(linter))
linter.register_checker(SingleLineCommentChecker(linter))
linter.register_checker(BlankLineBelowFileOverviewChecker(linter))
linter.register_checker(SingleLinePragmaChecker(linter))
linter.register_checker(SingleSpaceAfterKeyWordChecker(linter))
linter.register_checker(InequalityWithNoneChecker(linter))
linter.register_checker(NonTestFilesFunctionNameChecker(linter))
linter.register_checker(DisallowedFunctionsChecker(linter))
linter.register_checker(DisallowHandlerWithoutSchema(linter))
linter.register_checker(DisallowedImportsChecker(linter))
|
{
"content_hash": "44fb316ce635197b210f99ed008ff83b",
"timestamp": "",
"source": "github",
"line_count": 2295,
"max_line_length": 123,
"avg_line_length": 40.76514161220044,
"alnum_prop": 0.5574629099149173,
"repo_name": "kevinlee12/oppia",
"id": "9020d546c8ac3483e63d81d150483997e157e1c1",
"size": "94179",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/linters/pylint_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "205771"
},
{
"name": "HTML",
"bytes": "1835761"
},
{
"name": "JavaScript",
"bytes": "1182599"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "13670639"
},
{
"name": "Shell",
"bytes": "2239"
},
{
"name": "TypeScript",
"bytes": "13024194"
}
],
"symlink_target": ""
}
|
'''
@summary: Abstract Class for events.
'''
class Event(object):
"""
Event is base class providing an interface for all subsequent
(inherited) events, that will trigger further events in the
trading infrastructure.
"""
def __init__(self):
pass
|
{
"content_hash": "0a1fc91fd8e7910b18f92157f52775da",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 65,
"avg_line_length": 19.928571428571427,
"alnum_prop": 0.6487455197132617,
"repo_name": "abhipr1/StockForcastEngine",
"id": "8404ad1212f57a10705cfd634b45833d1a1a5295",
"size": "336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "49012"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
import os
from pyimpute import load_training_rasters, load_targets, impute
from pyimpute import stratified_sample_raster
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import cross_validation
from collections import OrderedDict
import logging
logger = logging.getLogger('pyimpute')
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(sh)
TRAINING_DIR = "./_aez_data/training"
def main():
# Define the known data points or "training" data
explanatory_fields = "tmin12c tmax8c p_ph_c pmean_wntrc pmean_sumrc irr_lands gt_demc grwsnc d2u2c".split()
explanatory_rasters = [os.path.join(TRAINING_DIR, r, "hdr.adf") for r in explanatory_fields]
response_raster = os.path.join(TRAINING_DIR, 'iso_zns3-27/hdr.adf')
# Take a random stratified sample
selected = stratified_sample_raster(response_raster,
target_sample_size=20, min_sample_proportion=0.01)
# Load the training rasters using the sampled subset
train_xs, train_y = load_training_rasters(response_raster,
explanatory_rasters, selected)
print(train_xs.shape, train_y.shape)
# Train the classifier
clf = ExtraTreesClassifier(n_estimators=10, n_jobs=1)
clf.fit(train_xs, train_y)
print(clf)
# Cross validate
k = 5
scores = cross_validation.cross_val_score(clf, train_xs, train_y, cv=k)
print("%d-fold Cross Validation Accuracy: %0.2f (+/- %0.2f)" % (k, scores.mean() * 100, scores.std() * 200))
# ... Other model assessment
# Run the model on the current data; i.e. predict itself
print("Imputing response rasters FOR CURRENT DATA")
target_xs, raster_info = load_targets(explanatory_rasters)
impute(target_xs, clf, raster_info, outdir="_aez_output_current",
linechunk=400, class_prob=True, certainty=True)
sys.exit()
years = ['2070s']
for year in years:
print("Loading target explanatory raster data, swapping out for %s climate data" % year)
fdir = os.path.join(TRAINING_DIR, "../RCP85/%s/" % year)
# swap out datasets that are predicted to change over time (i.e the climate data only)
climate_rasters = "grwsnc pmean_sumrc pmean_wntrc tmax8c tmin12c".split()
new_explanatory_rasters = OrderedDict(zip(explanatory_fields, explanatory_rasters))
for cr in climate_rasters:
new_explanatory_rasters[cr] = fdir + cr + "/hdr.adf"
target_xs, raster_info = load_targets(new_explanatory_rasters.values())
print("Imputing response rasters")
impute(target_xs, clf, raster_info, outdir="_aez_output_%s" % year,
linechunk=40, class_prob=True, certainty=True)
if __name__ == '__main__':
main()
|
{
"content_hash": "d22581d47de703214cd7acf16a4f9002",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 112,
"avg_line_length": 37.2,
"alnum_prop": 0.6906810035842293,
"repo_name": "ritviksahajpal/pyimpute",
"id": "71d85bdb7ddef81d958808bad5b7c86cb73bb6d3",
"size": "2790",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/example_raster_sampling.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15050"
},
{
"name": "Shell",
"bytes": "49"
}
],
"symlink_target": ""
}
|
'''
Return salt data via slack
This version of the returner is designed to be used with hubblestack pulsar, as
it expects the limited data which pulsar provides.
The following fields can be set in the minion conf file::
.. code-block:: yaml
slack_pulsar.channel (required)
slack_pulsar.api_key (required)
slack_pulsar.username (required)
slack_pulsar.as_user (required to see the profile picture of your bot)
slack_pulsar.profile (optional)
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:
.. code-block:: yaml
slack_pulsar.channel
slack_pulsar.api_key
slack_pulsar.username
slack_pulsar.as_user
Slack settings may also be configured as:
.. code-block:: yaml
slack:
channel: RoomName
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
username: user
as_user: true
alternative.slack:
room_id: RoomName
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from_name: user@email.com
slack_profile:
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from_name: user@email.com
slack:
profile: slack_profile
channel: RoomName
alternative.slack:
profile: slack_profile
channel: RoomName
'''
from __future__ import absolute_import
# Import Python libs
import pprint
import logging
import urllib
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext.six.moves.urllib.parse import urljoin as _urljoin # pylint: disable=import-error,no-name-in-module
import salt.ext.six.moves.http_client
# pylint: enable=import-error,no-name-in-module,redefined-builtin
# Import Salt Libs
import salt.returners
log = logging.getLogger(__name__)
__virtualname__ = 'slack_pulsar'
def _get_options(ret=None):
'''
Get the slack options from salt.
'''
defaults = {'channel': '#general'}
attrs = {'slack_profile': 'profile',
'channel': 'channel',
'username': 'username',
'as_user': 'as_user',
'api_key': 'api_key',
}
profile_attr = 'slack_profile'
profile_attrs = {'from_jid': 'from_jid',
'api_key': 'api_key',
'api_version': 'api_key'
}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
profile_attr=profile_attr,
profile_attrs=profile_attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults)
return _options
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
return __virtualname__
def _query(function,
api_key=None,
args=None,
method='GET',
header_dict=None,
data=None):
'''
Slack object method function to construct and execute on the API URL.
:param api_key: The Slack api key.
:param function: The Slack api function to perform.
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method.
:return: The json response from the API call or False.
'''
query_params = {}
ret = {'message': '',
'res': True}
slack_functions = {
'rooms': {
'request': 'channels.list',
'response': 'channels',
},
'users': {
'request': 'users.list',
'response': 'members',
},
'message': {
'request': 'chat.postMessage',
'response': 'channel',
},
}
if not api_key:
try:
options = __salt__['config.option']('slack')
if not api_key:
api_key = options.get('api_key')
except (NameError, KeyError, AttributeError):
log.error('No Slack api key found.')
ret['message'] = 'No Slack api key found.'
ret['res'] = False
return ret
api_url = 'https://slack.com'
base_url = _urljoin(api_url, '/api/')
path = slack_functions.get(function).get('request')
url = _urljoin(base_url, path, False)
if not isinstance(args, dict):
query_params = {}
query_params['token'] = api_key
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
result = salt.utils.http.query(
url,
method,
params=query_params,
data=data,
decode=True,
status=True,
header_dict=header_dict,
opts=__opts__,
)
if result.get('status', None) == salt.ext.six.moves.http_client.OK:
_result = result['dict']
response = slack_functions.get(function).get('response')
if 'error' in _result:
ret['message'] = _result['error']
ret['res'] = False
return ret
ret['message'] = _result.get(response)
return ret
elif result.get('status', None) == salt.ext.six.moves.http_client.NO_CONTENT:
return True
else:
log.debug(url)
log.debug(query_params)
log.debug(data)
log.debug(result)
_result = result['dict']
if 'error' in _result:
ret['message'] = _result['error']
ret['res'] = False
return ret
ret['message'] = _result.get(response)
return ret
def _post_message(channel,
message,
username,
as_user,
api_key=None):
'''
Send a message to a Slack room.
:param channel: The room name.
:param message: The message to send to the Slack room.
:param username: Specify who the message is from.
:param as_user: Sets the profile picture which have been added through Slack itself.
:param api_key: The Slack api key, if not specified in the configuration.
:param api_version: The Slack api version, if not specified in the configuration.
:return: Boolean if message was sent successfully.
'''
parameters = dict()
parameters['channel'] = channel
parameters['username'] = username
parameters['as_user'] = as_user
parameters['text'] = '```' + message + '```' # pre-formatted, fixed-width text
# Slack wants the body on POST to be urlencoded.
result = _query(function='message',
api_key=api_key,
method='POST',
header_dict={'Content-Type': 'application/x-www-form-urlencoded'},
data=urllib.urlencode(parameters))
log.debug('result {0}'.format(result))
if result:
return True
else:
return False
def returner(ret):
'''
Send an slack message with the data
'''
_options = _get_options(ret)
channel = _options.get('channel')
username = _options.get('username')
as_user = _options.get('as_user')
api_key = _options.get('api_key')
if not channel:
log.error('slack_pulsar.channel not defined in salt config')
return
if not username:
log.error('slack_pulsar.username not defined in salt config')
return
if not as_user:
log.error('slack_pulsar.as_user not defined in salt config')
return
if not api_key:
log.error('slack_pulsar.api_key not defined in salt config')
return
if ret and isinstance(ret, dict):
message = ('id: {0}\r\n'
'return: {1}\r\n').format(__opts__['id'],
pprint.pformat(ret.get('return')))
elif ret and isinstance(ret, list):
message = 'id: {0}\r\n'
for r in ret:
message += pprint.pformat(r.get('return'))
message += '\r\n'
else:
log.error('Data sent to slack_pulsar formatted incorrectly')
return
slack = _post_message(channel,
message,
username,
as_user,
api_key)
return slack
|
{
"content_hash": "9007bb9c4e394dbe225164c4cd43b626",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 113,
"avg_line_length": 28.85472972972973,
"alnum_prop": 0.5494672754946728,
"repo_name": "madchills/hubble",
"id": "0f52b6bb7bcb4c93af7d7c81e0ca38ef2f36ab62",
"size": "8565",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "hubblestack/extmods/returners/slack_pulsar_returner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "NSIS",
"bytes": "28717"
},
{
"name": "PowerShell",
"bytes": "6430"
},
{
"name": "Python",
"bytes": "299223"
},
{
"name": "Shell",
"bytes": "9057"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from dateutil.parser import parse
from flask_login import login_required
from flask_restful import Resource
from flask_restful import marshal_with
from flask_restful import reqparse
from flask_restful_swagger import swagger
from sqlalchemy import func
from app import db
from app.decorators import manager_required
from app.main import api
from app.main.meta import CoffeeModel
from app.main.meta import filter_params
from app.main.serializer import CoffeeResource
from app.models import Coffee
# from app.models import CoffeeVendor
from app.models import Feedback
from flask_kits1.restful import Paginate
from .resource import BusinessResource
def custom_datetime(value, name):
return parse(value)
def set_feedback_count(record):
if not isinstance(record, tuple):
return record
item, count = record
item.feedback_count = count
return item
def compatible_bool(value):
# if isinstance(value, basestring):
# LITERAL = {'false': False, 'true': True}
# json_value = value.lower()
# if json_value in LITERAL:
# return LITERAL.get(json_value)
return bool(value)
class CoffeeApi(BusinessResource):
@swagger.operation(notes="Coffee list", parameters=filter_params())
@Paginate(CoffeeResource, item_builder=set_feedback_count)
def get(self):
# db.session.query(Coffee, func.count(Feedback.id)).outerjoin(Feedback, Coffee.id == Feedback.coffee_id).group_by(
# Coffee.id).all()
# print db.session.query(Coffee.name, Coffee.id).first()
# print db.session.query(Coffee).order_by(Coffee.name)[2:5]
# print db.session.query(Coffee).order_by(Coffee.name).limit(2).all()
# print 'count:', db.session.query(func.sum(Coffee.id)).scalar()
# print 'count & sum', db.session.query(func.count(Coffee.id), func.sum(Coffee.id)).first()
result = db.session.query(func.count(1).label('count_1'), func.sum(Coffee.id).label('sum_1')).first()
print('count & sum', result.keys(), result.count_1, result.sum_1)
print(Feedback.query.filter(Feedback.coffee_id == 1, Feedback.id > 1).order_by(
Feedback.create_time.desc()).limit(2).all())
start = datetime(2016, 11, 11, 0, 0, 0, 0)
end = datetime(2016, 11, 11, 23, 59, 59, 0)
query = Coffee.query.filter(Coffee.on_sale_date >= start, Coffee.on_sale_date <= end)
print(query.all())
# query = db.session.query(Coffee, func.count(1)).join((Vendor, CoffeeVendor)).order_by(
# Coffee.on_sale_date.desc())
# print query.all()
query = db.session.query(Coffee, func.count(Feedback.id))
query = query.outerjoin(Feedback, Coffee.id == Feedback.coffee_id)
query = query.filter(Coffee.id > 1)
query = query.group_by(Coffee.id).order_by(Coffee.on_sale_date.desc())
return query
# return Coffee.query.filter_by(vendor_id=1)
@swagger.operation(notes="New Coffee List",
parameters=[{
'name': 'coffee',
'description': 'new coffer information',
'dataType': CoffeeModel.__name__,
'paramType': 'body'
}])
@marshal_with(CoffeeResource)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('name', str, help="coffer's name")
parser.add_argument('on_sale_date', type=custom_datetime)
parser.add_argument('imported', type=compatible_bool)
parser.add_argument('imported2', type=bool)
args = parser.parse_args()
coffee = Coffee(name=args.name, on_sale_date=args.on_sale_date, vendor_id=1, imported=args.imported)
db.session.add(coffee)
db.session.commit()
return coffee
api.add_resource(CoffeeApi, '/coffees')
class Coffee2Api(Resource):
@login_required
@manager_required
@swagger.operation(notes="Coffee list", parameters=filter_params())
@marshal_with(CoffeeResource)
def get(self):
return Coffee.query.paginate().items
api.add_resource(Coffee2Api, '/coffees2')
|
{
"content_hash": "124f6cef1e3215b3b908a5e0850d6463",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 122,
"avg_line_length": 38.68468468468468,
"alnum_prop": 0.6346064275733582,
"repo_name": "by46/coffee",
"id": "e94a645f47a80f8b9dc92395a569deb24bce93dd",
"size": "4294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/views/coffee.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7825"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "79856"
},
{
"name": "Shell",
"bytes": "2693"
}
],
"symlink_target": ""
}
|
"""This package contains the whole boilerplateapp with all of its models, views and other modules.
This particular file additionally contains the applications factory.
"""
from flask import Flask
def create_app(config_name):
"""Flask app factory function.
It takes a `config_name` of the specific configuration to use for this instantiation.
"""
app = Flask(__name__, static_folder=None)
from boilerplateapp.config import configs
app.config.from_object(configs[config_name])
# Initialize extensions
from boilerplateapp.extensions import db, passlib
db.init_app(app)
passlib.init_app(app)
# Initialize handlers
from boilerplateapp.handlers import register_handlers
register_handlers(app)
# Initialize blueprints
from boilerplateapp.api import api
app.register_blueprint(api)
# Initialize custom commands
from boilerplateapp.cli import register_cli
register_cli(app)
return app
|
{
"content_hash": "38c9e383b3f71af78ef2a5862a39baab",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 98,
"avg_line_length": 26.86111111111111,
"alnum_prop": 0.733195449844881,
"repo_name": "svenstaro/python-web-boilerplate",
"id": "98811f7a7c1e1d3824a3130cb9a700738548ee9e",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boilerplateapp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "41340"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.rpm
Tool-specific initialization for rpm.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The rpm tool calls the rpmbuild command. The first and only argument should a
tar.gz consisting of the source file and a specfile.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/rpm.py 2014/08/24 12:12:31 garyo"
import os
import re
import shutil
import subprocess
import SCons.Builder
import SCons.Node.FS
import SCons.Util
import SCons.Action
import SCons.Defaults
def get_cmd(source, env):
tar_file_with_included_specfile = source
if SCons.Util.is_List(source):
tar_file_with_included_specfile = source[0]
return "%s %s %s"%(env['RPM'], env['RPMFLAGS'],
tar_file_with_included_specfile.abspath )
def build_rpm(target, source, env):
# create a temporary rpm build root.
tmpdir = os.path.join( os.path.dirname( target[0].abspath ), 'rpmtemp' )
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
# now create the mandatory rpm directory structure.
for d in ['RPMS', 'SRPMS', 'SPECS', 'BUILD']:
os.makedirs( os.path.join( tmpdir, d ) )
# set the topdir as an rpmflag.
env.Prepend( RPMFLAGS = '--define \'_topdir %s\'' % tmpdir )
# now call rpmbuild to create the rpm package.
handle = subprocess.Popen(get_cmd(source, env),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
output = handle.stdout.read()
status = handle.wait()
if status:
raise SCons.Errors.BuildError( node=target[0],
errstr=output,
filename=str(target[0]) )
else:
# XXX: assume that LC_ALL=C is set while running rpmbuild
output_files = re.compile( 'Wrote: (.*)' ).findall( output )
for output, input in zip( output_files, target ):
rpm_output = os.path.basename(output)
expected = os.path.basename(input.get_path())
assert expected == rpm_output, "got %s but expected %s" % (rpm_output, expected)
shutil.copy( output, input.abspath )
# cleanup before leaving.
shutil.rmtree(tmpdir)
return status
def string_rpm(target, source, env):
try:
return env['RPMCOMSTR']
except KeyError:
return get_cmd(source, env)
rpmAction = SCons.Action.Action(build_rpm, string_rpm)
RpmBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$RPMCOM', '$RPMCOMSTR'),
source_scanner = SCons.Defaults.DirScanner,
suffix = '$RPMSUFFIX')
def generate(env):
"""Add Builders and construction variables for rpm to an Environment."""
try:
bld = env['BUILDERS']['Rpm']
except KeyError:
bld = RpmBuilder
env['BUILDERS']['Rpm'] = bld
env.SetDefault(RPM = 'LC_ALL=C rpmbuild')
env.SetDefault(RPMFLAGS = SCons.Util.CLVar('-ta'))
env.SetDefault(RPMCOM = rpmAction)
env.SetDefault(RPMSUFFIX = '.rpm')
def exists(env):
return env.Detect('rpmbuild')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "ec66c46fc395e572349435d670f52806",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 92,
"avg_line_length": 34.06060606060606,
"alnum_prop": 0.6548042704626335,
"repo_name": "engineer0x47/SCONS",
"id": "c14159de6b2172da16ca43ff2b1db51bb452f7b8",
"size": "4496",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "engine/SCons/Tool/rpm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3707391"
},
{
"name": "Shell",
"bytes": "2934"
}
],
"symlink_target": ""
}
|
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-allauth'
copyright = u'2016, Raymond Penners'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.25.2'
# The full version, including alpha/beta/rc tags.
release = '0.25.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-allauthdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-allauth.tex', u'django-allauth Documentation',
u'Raymond Penners', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-allauth', u'django-allauth Documentation',
[u'Raymond Penners'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-allauth', u'django-allauth Documentation',
u'Raymond Penners', 'django-allauth', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "c70eb1b40d46fe28516c1f494c460f3c",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 80,
"avg_line_length": 31.469767441860466,
"alnum_prop": 0.7044043748152528,
"repo_name": "jwhitlock/django-allauth",
"id": "a9435fd6de20ba125b680a962f76f623f3b6c984",
"size": "7794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42100"
},
{
"name": "JavaScript",
"bytes": "3967"
},
{
"name": "Makefile",
"bytes": "295"
},
{
"name": "Python",
"bytes": "710875"
}
],
"symlink_target": ""
}
|
import os
import sys
import operator
import gzip
import math
import numpy as np
from collections import defaultdict
def main(directory, source):
print ''
print '==========================='
print ' Memory Analysis '
print '==========================='
print 'Running mem_analysis.main()'
print 'Analyzing: ' + source
print ''
BMKROOT = directory
os.chdir(BMKROOT)
BINARY = source + '.llvm'
t_histogram = {}
s_histogram = {}
for i in range(21):
value = int(math.pow(2, i))
t_histogram[value] = 0
s_histogram[value] = 0
# Stride Analysis for Spatial Locality
spatial_locality_score = 0 # Set up output variable.
addr_id = 0
past_32 = []
stride_access = 0
# Reuse Distance for Temporal Locality
temporal_locality_score = 0 # Set up output variable.
last_access = {}
mem_accesses = 0
addr_id = 0
mem_trace = gzip.open(BINARY + '_memtrace.gz', 'r')
for line in mem_trace:
addr = int(line.rstrip().split(',')[1])
addr_id += 1
mem_accesses += 1
# Reuse Distance
if addr in last_access:
stride = addr_id - last_access[addr]
if stride > 1048576:
stride = 1048576
t_histogram[int(math.pow(2, int(math.ceil(math.log(stride, 2)))))] += 1
last_access[addr] = addr_id
# Histogram of stride access
if len(past_32) >= 32:
stride = 1048576
for item in past_32:
if math.fabs(item - addr) < stride:
stride = math.fabs(item - addr)
if stride != 0:
s_histogram[int(math.pow(2, int(math.ceil(math.log(stride, 2)))))] += 1
stride_access += 1
del past_32[0]
past_32.append(addr)
mem_trace.close()
# Distribution of Stride Accesses
s_distribution = []
for i in range(len(s_histogram)):
if stride_access == 0:
spatial_locality_score = 0
break
percent = s_histogram[int(math.pow(2, i))] * 1.0 / stride_access
s_distribution.append(percent)
spatial_locality_score += percent * 1.0 / int(math.pow(2, i))
print 'Spatial locality score :\t%0.4f' % (spatial_locality_score)
# PLOT begins
spalo = open(BINARY + '_spatial_locality', 'w')
spalo.write('%0.4f\n' % (spatial_locality_score))
spalo.close()
# PLOT ends
# Cumulative Distribution of Reuse Distance
t_distribution = []
for i in range(len(t_histogram)):
percent = t_histogram[int(math.pow(2, i))] * 1.0 / mem_accesses
t_distribution.append(percent)
temporal_locality_score += percent * 1.0 * \
(len(t_histogram) - i) / len(t_histogram)
print 'Temporal locality score:\t%0.4f' % (temporal_locality_score)
# PLOT begins
temlo = open(BINARY + '_temporal_locality', 'w')
temlo.write('%0.4f\n' % (temporal_locality_score))
temlo.close()
# PLOT ends
stride_output = open(BINARY + '_stride_profile', 'w')
for i, percent in zip(range(len(s_distribution)), s_distribution):
stride = int(math.pow(2, i))
stride_output.write("%d,%f\n" % (stride, percent))
stride_output.close()
reuse_output = open(BINARY + '_reuse_profile', 'w')
for i, percent in zip(range(len(t_distribution)), t_distribution):
reuse = int(math.pow(2, i))
reuse_output.write("%d,%f\n" % (reuse, percent))
reuse_output.close()
print '---------------------------'
results = {"spatial_locality_score": spatial_locality_score,
"temporal_locality_score": temporal_locality_score}
return results
if __name__ == '__main__':
directory = sys.argv[1]
source = sys.argv[2]
main(directory, source)
|
{
"content_hash": "903c9480aeb47f296cd7ac6afb65a680",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 29.325,
"alnum_prop": 0.6160841148053424,
"repo_name": "ysshao/WIICA",
"id": "0cb6ea37288600804cb5a7d70bc3ec2caa544f3f",
"size": "3541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/mem_analysis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "49607"
},
{
"name": "Python",
"bytes": "23209"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from importlib import import_module
import os
import sys
from django.apps import apps
from django.db.migrations.recorder import MigrationRecorder
from django.db.migrations.graph import MigrationGraph, NodeNotFoundError
from django.utils import six
from django.conf import settings
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader(object):
"""
Loads migration files from disk, and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_migrations=False):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label]
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME)
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name = self.migrations_module(app_config.label)
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
six.moves.reload_module(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
south_style_migrations = False
for migration_name in migration_names:
try:
migration_module = import_module("%s.%s" % (module_name, migration_name))
except ImportError as e:
# Ignore South import errors, as we're triggering them
if "south" in str(e).lower():
south_style_migrations = True
break
raise
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class" % (migration_name, app_config.label)
)
# Ignore South-style migrations
if hasattr(migration_module.Migration, "forwards"):
south_style_migrations = True
break
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(migration_name, app_config.label)
if south_style_migrations:
self.unmigrated_apps.add(app_config.label)
def get_migration(self, app_label, name_prefix):
"Gets the migration exactly named, or raises `graph.NodeNotFoundError`"
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
# Do the search
results = []
for l, n in self.disk_migrations:
if l == app_label and n.startswith(name_prefix):
results.append((l, n))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return list(self.graph.root_nodes(key[0]))[0]
else: # "__latest__"
return list(self.graph.leaf_nodes(key[0]))[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError("Dependency on app with no migrations: %s" % key[0])
raise ValueError("Dependency on unknown app: %s" % key[0])
def build_graph(self):
"""
Builds a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = set()
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# Do a first pass to separate out replacing and non-replacing migrations
normal = {}
replacing = {}
for key, migration in self.disk_migrations.items():
if migration.replaces:
replacing[key] = migration
else:
normal[key] = migration
# Calculate reverse dependencies - i.e., for each migration, what depends on it?
# This is just for dependency re-pointing when applying replacements,
# so we ignore run_before here.
reverse_dependencies = {}
for key, migration in normal.items():
for parent in migration.dependencies:
reverse_dependencies.setdefault(parent, set()).add(key)
# Remeber the possible replacements to generate more meaningful error
# messages
reverse_replacements = {}
for key, migration in replacing.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Carry out replacements if we can - that is, if all replaced migrations
# are either unapplied or missing.
for key, migration in replacing.items():
# Ensure this replacement migration is not in applied_migrations
self.applied_migrations.discard(key)
# Do the check. We can replace if all our replace targets are
# applied, or if all of them are unapplied.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
can_replace = all(applied_statuses) or (not any(applied_statuses))
if not can_replace:
continue
# Alright, time to replace. Step through the replaced migrations
# and remove, repointing dependencies if needs be.
for replaced in migration.replaces:
if replaced in normal:
# We don't care if the replaced migration doesn't exist;
# the usage pattern here is to delete things after a while.
del normal[replaced]
for child_key in reverse_dependencies.get(replaced, set()):
if child_key in migration.replaces:
continue
# child_key may appear in a replacement
if child_key in reverse_replacements:
for replaced_child_key in reverse_replacements[child_key]:
if replaced in replacing[replaced_child_key].dependencies:
replacing[replaced_child_key].dependencies.remove(replaced)
replacing[replaced_child_key].dependencies.append(key)
else:
normal[child_key].dependencies.remove(replaced)
normal[child_key].dependencies.append(key)
normal[key] = migration
# Mark the replacement as applied if all its replaced ones are
if all(applied_statuses):
self.applied_migrations.add(key)
# Finally, make a graph and load everything into it
self.graph = MigrationGraph()
for key, migration in normal.items():
self.graph.add_node(key, migration)
def _reraise_missing_dependency(migration, missing, exc):
"""
Checks if ``missing`` could have been replaced by any squash
migration but wasn't because the the squash migration was partially
applied before. In that case raise a more understandable exception.
#23556
"""
if missing in reverse_replacements:
candidates = reverse_replacements.get(missing, set())
is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
if not is_replaced:
tries = ', '.join('%s.%s' % c for c in candidates)
exc_value = NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
migration, missing[0], missing[1], tries
),
missing)
exc_value.__cause__ = exc
six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2])
raise exc
# Add all internal dependencies first to ensure __first__ dependencies
# find the correct root node.
for key, migration in normal.items():
for parent in migration.dependencies:
if parent[0] != key[0] or parent[1] == '__first__':
# Ignore __first__ references to the same app (#22325)
continue
try:
self.graph.add_dependency(migration, key, parent)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "parent" is not in there. To make the raised exception
# more understandable we check if parent could have been
# replaced but hasn't (eg partially applied squashed
# migration)
_reraise_missing_dependency(migration, parent, e)
for key, migration in normal.items():
for parent in migration.dependencies:
if parent[0] == key[0]:
# Internal dependencies already added.
continue
parent = self.check_key(parent, key[0])
if parent is not None:
try:
self.graph.add_dependency(migration, key, parent)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "parent" is not in there.
_reraise_missing_dependency(migration, parent, e)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
try:
self.graph.add_dependency(migration, child, key)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "child" is not in there.
_reraise_missing_dependency(migration, child, e)
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Returns a ProjectState object representing the most recent state
that the migrations we loaded represent.
See graph.make_state for the meaning of "nodes" and "at_end"
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
class BadMigrationError(Exception):
"""
Raised when there's a bad migration (unreadable/bad format/etc.)
"""
pass
class AmbiguityError(Exception):
"""
Raised when more than one migration matches a name prefix
"""
pass
|
{
"content_hash": "995d537518b14cd5e1618834391e7e66",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 133,
"avg_line_length": 46.65982404692082,
"alnum_prop": 0.5772107347118346,
"repo_name": "edevil/django",
"id": "9cc428e1622245e6e705cc7521c37616e2c67224",
"size": "15911",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/db/migrations/loader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10540191"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
"""
Utility functions for Selenium unit tests.
"""
from metashare import settings
from metashare.settings import ROOT_PATH, TEST_MODE_NAME
import os
from metashare import test_utils
import time
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from django_selenium import settings as dj_settings
from django_selenium.testcases import MyDriver, SeleniumTestCase
def login_user(driver, user_name, user_passwd):
"""
logs in given user;
assumes that the browser is at the top level META-SHARE page
"""
# TODO remove this workaround when Selenium starts working again as intended
driver.set_window_size(3250, 2600)
driver.find_element_by_xpath("//div[@id='inner']/div[2]/a[2]/div").click()
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(user_name)
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(user_passwd)
driver.find_element_by_css_selector("input.button.middle_button").click()
def mouse_over(driver, web_ele):
"""
simulates mouse over the given web element
"""
code = "var fireOnThis = arguments[0]; " + \
"var evObj = document.createEvent('MouseEvents'); " + \
"evObj.initEvent( 'mouseover', true, true ); " + \
"fireOnThis.dispatchEvent(evObj);"
driver.execute_script(code, web_ele)
def click_menu_item(driver, web_ele):
"""
Simulates a click on the given web element which must have an attribute
'href'. The click is simulated by just following the link.
"""
driver.get(web_ele.get_attribute("href"))
def save_and_close(driver, target_id):
"""
Clicks the save button in the current window, waits until it is closed and
then changes to the window with the given target id is closed.
"""
time.sleep(10)
current_id = driver.current_window_handle
driver.find_element_by_name("_save").click()
wait_till_closed_and_switch(driver, current_id, target_id)
def wait_till_closed_and_switch(driver, closing_id, target_id):
"""
Waits ~40 seconds until the window with the given `closing_id` is closed or
throws a `TimeoutException`. If closing was successful, the driver switches
to the window with the given `target_id`.
"""
max_wait = 60
while closing_id in driver.window_handles and max_wait:
time.sleep(1)
max_wait -= 1
if not max_wait:
raise TimeoutException('Window was not closed in time.')
driver.switch_to.window(target_id)
def cancel_and_close(driver, target_id):
"""
Clicks the cancel button in the current window, confirm the alert dialog,
waits until it is closed and then changes to the window with the given target id
is closed.
"""
time.sleep(10)
current_id = driver.current_window_handle
driver.find_element_by_name("_cancel").click()
alert = driver.switch_to_alert()
alert.accept()
# TODO remove this workaround when Selenium starts working again as intended
time.sleep(1)
wait_till_closed_and_switch(driver, current_id, target_id)
def cancel_and_continue(driver, target_id):
"""
Clicks the cancel button in the current window, confirm the alert dialog and continue
"""
driver.find_element_by_name("_cancel").click()
alert = driver.switch_to_alert()
alert.accept()
# TODO remove this workaround when Selenium starts working again as intended
time.sleep(1)
def click_and_wait(web_ele):
"""
Clicks the given web element and waits 1 second.
"""
web_ele.click()
# TODO remove this workaround when Selenium starts working again as intended
time.sleep(1)
def setup_screenshots_folder(test_class, test_method):
"""
prepares a folder for screenshots for the given test
"""
ss_path = '{0}/reports/{1}/{2}'.format(ROOT_PATH, test_class, test_method)
if not os.path.exists(ss_path):
os.makedirs(ss_path)
for one_file in os.listdir(ss_path):
file_path = os.path.join(ss_path, one_file)
os.unlink(file_path)
return ss_path
def import_dir(path):
"""
imports all XML files in the given directory
"""
# to speed up the import, we disable indexing during the import and only
# rebuild the index at afterwards
os.environ['DISABLE_INDEXING_DURING_IMPORT'] = 'True'
_files = os.listdir(path)
for _file in _files:
test_utils.import_xml_or_zip("%s%s" % (path, _file))
os.environ['DISABLE_INDEXING_DURING_IMPORT'] = 'False'
from django.core.management import call_command
call_command('rebuild_index', interactive=False, using=TEST_MODE_NAME)
class MetashareMyDriver(MyDriver):
def __init__(self):
driver = getattr(webdriver, dj_settings.SELENIUM_DRIVER, None)
assert driver, "dj_settings.SELENIUM_DRIVER contains non-existing driver"
driver_opts = getattr(settings, "SELENIUM_DRIVER_OPTS", dict())
if "firefox_profile" in driver_opts.keys():
driver_opts["firefox_profile"] = webdriver.FirefoxProfile(driver_opts["firefox_profile"])
if driver is webdriver.Remote:
if isinstance(dj_settings.SELENIUM_CAPABILITY, dict):
capability = dj_settings.SELENIUM_CAPABILITY
else:
capability = getattr(webdriver.DesiredCapabilities, dj_settings.SELENIUM_CAPABILITY, None)
assert capability, 'dj_settings.SELENIUM_CAPABILITY contains non-existing capability'
self.driver = driver('http://%s:%d/wd/hub' % (dj_settings.SELENIUM_HOST, dj_settings.SELENIUM_PORT), capability, **driver_opts)
else:
self.driver = driver(**driver_opts)
self.live_server_url = 'http://%s:%s' % (dj_settings.SELENIUM_TESTSERVER_HOST , str(dj_settings.SELENIUM_TESTSERVER_PORT))
self.text = ''
class MetashareSeleniumTestCase(SeleniumTestCase):
def setUp(self):
import socket
socket.setdefaulttimeout(dj_settings.SELENIUM_TIMEOUT)
self.driver = MetashareMyDriver()
def spin_assert(self, assertion):
for i in xrange(60):
try:
assertion()
return
except Exception, e:
if i == 59:
raise
time.sleep(1)
|
{
"content_hash": "76f44a0875f653caf0396bc96498f489",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 139,
"avg_line_length": 38.52662721893491,
"alnum_prop": 0.6525879281216403,
"repo_name": "zeehio/META-SHARE",
"id": "6494bfcba37eb3d09510daf4965ef8cc30a222fb",
"size": "6511",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "metashare/repository/seltests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7362"
},
{
"name": "C",
"bytes": "321"
},
{
"name": "C++",
"bytes": "112277"
},
{
"name": "CSS",
"bytes": "125117"
},
{
"name": "HTML",
"bytes": "2956138"
},
{
"name": "Java",
"bytes": "12780"
},
{
"name": "JavaScript",
"bytes": "201032"
},
{
"name": "M4",
"bytes": "8416"
},
{
"name": "Makefile",
"bytes": "26172"
},
{
"name": "Python",
"bytes": "4084877"
},
{
"name": "Shell",
"bytes": "121386"
},
{
"name": "XSLT",
"bytes": "473763"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.insert(0, os.path.dirname(__file__) + '/../lib')
|
{
"content_hash": "508442518e3e025942613ba7a534179e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 57,
"avg_line_length": 20,
"alnum_prop": 0.6375,
"repo_name": "casmlab/quac",
"id": "6c2e2c6c86de7d24ace84ab3ed33721d76a3b835",
"size": "605",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bin/quacpath.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5004"
},
{
"name": "Gnuplot",
"bytes": "1396"
},
{
"name": "Makefile",
"bytes": "12373"
},
{
"name": "PLpgSQL",
"bytes": "2740"
},
{
"name": "Python",
"bytes": "570122"
},
{
"name": "Shell",
"bytes": "56557"
}
],
"symlink_target": ""
}
|
import ast
import sys
from collections import namedtuple
from functools import partial
from typing import Optional
import astroid
_ast_py3 = None
try:
import typed_ast.ast3 as _ast_py3
except ImportError:
pass
PY38 = sys.version_info[:2] >= (3, 8)
if PY38:
# On Python 3.8, typed_ast was merged back into `ast`
_ast_py3 = ast
FunctionType = namedtuple("FunctionType", ["argtypes", "returns"])
class ParserModule(
namedtuple(
"ParserModule",
[
"module",
"unary_op_classes",
"cmp_op_classes",
"bool_op_classes",
"bin_op_classes",
"context_classes",
],
)
):
def parse(self, string: str, type_comments=True):
if self.module is _ast_py3:
if PY38:
parse_func = partial(self.module.parse, type_comments=type_comments)
else:
parse_func = partial(
self.module.parse, feature_version=sys.version_info.minor
)
else:
parse_func = self.module.parse
return parse_func(string)
def parse_function_type_comment(type_comment: str) -> Optional[FunctionType]:
"""Given a correct type comment, obtain a FunctionType object"""
if _ast_py3 is None:
return None
func_type = _ast_py3.parse(type_comment, "<type_comment>", "func_type")
return FunctionType(argtypes=func_type.argtypes, returns=func_type.returns)
def get_parser_module(type_comments=True) -> ParserModule:
if not type_comments:
parser_module = ast
else:
parser_module = _ast_py3
parser_module = parser_module or ast
unary_op_classes = _unary_operators_from_module(parser_module)
cmp_op_classes = _compare_operators_from_module(parser_module)
bool_op_classes = _bool_operators_from_module(parser_module)
bin_op_classes = _binary_operators_from_module(parser_module)
context_classes = _contexts_from_module(parser_module)
return ParserModule(
parser_module,
unary_op_classes,
cmp_op_classes,
bool_op_classes,
bin_op_classes,
context_classes,
)
def _unary_operators_from_module(module):
return {module.UAdd: "+", module.USub: "-", module.Not: "not", module.Invert: "~"}
def _binary_operators_from_module(module):
binary_operators = {
module.Add: "+",
module.BitAnd: "&",
module.BitOr: "|",
module.BitXor: "^",
module.Div: "/",
module.FloorDiv: "//",
module.MatMult: "@",
module.Mod: "%",
module.Mult: "*",
module.Pow: "**",
module.Sub: "-",
module.LShift: "<<",
module.RShift: ">>",
}
return binary_operators
def _bool_operators_from_module(module):
return {module.And: "and", module.Or: "or"}
def _compare_operators_from_module(module):
return {
module.Eq: "==",
module.Gt: ">",
module.GtE: ">=",
module.In: "in",
module.Is: "is",
module.IsNot: "is not",
module.Lt: "<",
module.LtE: "<=",
module.NotEq: "!=",
module.NotIn: "not in",
}
def _contexts_from_module(module):
return {
module.Load: astroid.Load,
module.Store: astroid.Store,
module.Del: astroid.Del,
module.Param: astroid.Store,
}
|
{
"content_hash": "208c093968cd9a890ab36b7db171dc8f",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 86,
"avg_line_length": 25.83969465648855,
"alnum_prop": 0.5822747415066469,
"repo_name": "ruchee/vimrc",
"id": "55e81c687581e0341eed2dc41add40cfd9d0a870",
"size": "3385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/astroid/astroid/_ast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
}
|
"""
Provide the class Message and its subclasses.
"""
class Message(object):
message = ''
message_args = ()
def __init__(self, filename, loc):
self.filename = filename
self.lineno = loc.lineno
self.col = getattr(loc, 'col_offset', 0)
def __str__(self):
return '%s:%s: %s' % (self.filename, self.lineno,
self.message % self.message_args)
class UnusedImport(Message):
message = '%r imported but unused'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class RedefinedWhileUnused(Message):
message = 'redefinition of unused %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class RedefinedInListComp(Message):
message = 'list comprehension redefines %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportShadowedByLoopVar(Message):
message = 'import %r from line %r shadowed by loop variable'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportStarNotPermitted(Message):
message = "'from %s import *' only allowed at module level"
def __init__(self, filename, loc, modname):
Message.__init__(self, filename, loc)
self.message_args = (modname,)
class ImportStarUsed(Message):
message = "'from %s import *' used; unable to detect undefined names"
def __init__(self, filename, loc, modname):
Message.__init__(self, filename, loc)
self.message_args = (modname,)
class ImportStarUsage(Message):
message = "%s may be undefined, or defined from star imports: %s"
def __init__(self, filename, loc, name, from_list):
Message.__init__(self, filename, loc)
self.message_args = (name, from_list)
class UndefinedName(Message):
message = 'undefined name %r'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class DoctestSyntaxError(Message):
message = 'syntax error in doctest'
def __init__(self, filename, loc, position=None):
Message.__init__(self, filename, loc)
if position:
(self.lineno, self.col) = position
self.message_args = ()
class UndefinedExport(Message):
message = 'undefined name %r in __all__'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class UndefinedLocal(Message):
message = ('local variable %r (defined in enclosing scope on line %r) '
'referenced before assignment')
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class DuplicateArgument(Message):
message = 'duplicate argument %r in function definition'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class LateFutureImport(Message):
message = 'from __future__ imports must occur at the beginning of the file'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = ()
class FutureFeatureNotDefined(Message):
"""An undefined __future__ feature name was imported."""
message = 'future feature %s is not defined'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class UnusedVariable(Message):
"""
Indicates that a variable has been explicitly assigned to but not actually
used.
"""
message = 'local variable %r is assigned to but never used'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
class ReturnWithArgsInsideGenerator(Message):
"""
Indicates a return statement with arguments inside a generator.
"""
message = '\'return\' with argument inside generator'
class ReturnOutsideFunction(Message):
"""
Indicates a return statement outside of a function/method.
"""
message = '\'return\' outside function'
class YieldOutsideFunction(Message):
"""
Indicates a yield or yield from statement outside of a function/method.
"""
message = '\'yield\' outside function'
# For whatever reason, Python gives different error messages for these two. We
# match the Python error message exactly.
class ContinueOutsideLoop(Message):
"""
Indicates a continue statement outside of a while or for loop.
"""
message = '\'continue\' not properly in loop'
class BreakOutsideLoop(Message):
"""
Indicates a break statement outside of a while or for loop.
"""
message = '\'break\' outside loop'
class ContinueInFinally(Message):
"""
Indicates a continue statement in a finally block in a while or for loop.
"""
message = '\'continue\' not supported inside \'finally\' clause'
class DefaultExceptNotLast(Message):
"""
Indicates an except: block as not the last exception handler.
"""
message = 'default \'except:\' must be last'
class TwoStarredExpressions(Message):
"""
Two or more starred expressions in an assignment (a, *b, *c = d).
"""
message = 'two starred expressions in assignment'
class TooManyExpressionsInStarredAssignment(Message):
"""
Too many expressions in an assignment with star-unpacking
"""
message = 'too many expressions in star-unpacking assignment'
class AssertTuple(Message):
"""
Assertion test is a tuple, which are always True.
"""
message = 'assertion is always true, perhaps remove parentheses?'
|
{
"content_hash": "d9bbcd993fa1a3a2b40cee80253cb1ef",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 79,
"avg_line_length": 27.963133640552996,
"alnum_prop": 0.6423862887277522,
"repo_name": "Aorjoa/aiyara-ceph-dash",
"id": "05db5bfabbc0cac8008ad118e98acffcb3fbb583",
"size": "6068",
"binary": false,
"copies": "2",
"ref": "refs/heads/aiyara",
"path": ".tox/flake8/lib/python2.7/site-packages/pyflakes/messages.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "29951"
},
{
"name": "Groff",
"bytes": "17679"
},
{
"name": "HTML",
"bytes": "12219"
},
{
"name": "JavaScript",
"bytes": "218064"
},
{
"name": "Python",
"bytes": "6880049"
},
{
"name": "Shell",
"bytes": "6504"
}
],
"symlink_target": ""
}
|
"""Application controller for FastTree
designed for FastTree v1.1.0 . Also functions with v2.0.1, v2.1.0, and v2.1.3
though only with basic functionality"""
from cogent.app.parameters import ValuedParameter, FlagParameter, \
MixedParameter
from cogent.app.util import CommandLineApplication, FilePath, system, \
CommandLineAppResult, ResultPath, remove, ApplicationError
from cogent.core.tree import PhyloNode
from cogent.parse.tree import DndParser
from cogent.core.moltype import DNA, RNA, PROTEIN
from cogent.core.alignment import SequenceCollection
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Daniel McDonald", "Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Daniel McDonald"
__email__ = "mcdonadt@colorado.edu"
__status__ = "Development"
class FastTree(CommandLineApplication):
"""FastTree application Controller"""
_command = 'FastTree'
_input_handler = '_input_as_multiline_string'
_parameters = {
'-quiet':FlagParameter('-',Name='quiet'),
'-boot':ValuedParameter('-',Delimiter=' ',Name='boot'),
'-seed':ValuedParameter('-',Delimiter=' ',Name='seed'),
'-nni':ValuedParameter('-',Delimiter=' ',Name='nni'),
'-slow':FlagParameter('-',Name='slow'),
'-fastest':FlagParameter('-',Name='fastest'),
'-top':FlagParameter('-',Name='top'),
'-notop':FlagParameter('-',Name='notop'),
'-topm':ValuedParameter('-',Delimiter=' ',Name='topm'),
'-close':ValuedParameter('-',Delimiter=' ',Name='close'),
'-refresh':ValuedParameter('-',Delimiter=' ',Name='refresh'),
'-matrix':ValuedParameter('-',Delimiter=' ',Name='matrix'),
'-nomatrix':FlagParameter('-',Name='nomatrix'),
'-nj':FlagParameter('-',Name='nj'),
'-bionj':FlagParameter('-',Name='bionj'),
'-nt':FlagParameter('-',Name='nt'),
'-n':ValuedParameter('-',Delimiter=' ',Name='n'),
'-pseudo':MixedParameter('-',Delimiter=' ', Name='pseudo'),
'-intree':ValuedParameter('-',Delimiter=' ',Name='intree'),
'-spr':ValuedParameter('-',Delimiter=' ',Name='spr'),
'-constraints':ValuedParameter('-',Delimiter=' ',\
Name='constraints'),
'-constraintWeight':ValuedParameter('-',Delimiter=' ',\
Name='constraintWeight'),\
'-makematrix':ValuedParameter('-',Delimiter=' ',Name='makematrix')}
def __call__(self,data=None, remove_tmp=True):
"""Run the application with the specified kwargs on data
data: anything that can be cast into a string or written out to
a file. Usually either a list of things or a single string or
number. input_handler will be called on this data before it
is passed as part of the command-line argument, so by creating
your own input handlers you can customize what kind of data
you want your application to accept
remove_tmp: if True, removes tmp files
NOTE: Override of the base class to handle redirected output
"""
input_handler = self.InputHandler
suppress_stderr = self.SuppressStderr
outfile = self.getTmpFilename(self.TmpDir)
self._outfile = outfile
if suppress_stderr:
errfile = FilePath('/dev/null')
else:
errfile = FilePath(self.getTmpFilename(self.TmpDir))
if data is None:
input_arg = ''
else:
input_arg = getattr(self,input_handler)(data)
# Build up the command, consisting of a BaseCommand followed by
# input and output (file) specifications
command = self._command_delimiter.join(filter(None,\
[self.BaseCommand,str(input_arg),'>',str(outfile),'2>',\
str(errfile)]))
if self.HaltExec:
raise AssertionError, "Halted exec with command:\n" + command
# The return value of system is a 16-bit number containing the signal
# number that killed the process, and then the exit status.
# We only want to keep the exit status so do a right bitwise shift to
# get rid of the signal number byte
exit_status = system(command) >> 8
# Determine if error should be raised due to exit status of
# appliciation
if not self._accept_exit_status(exit_status):
raise ApplicationError, \
'Unacceptable application exit status: %s, command: %s'\
% (str(exit_status),command)
out = open(outfile,"r")
err = None
if not suppress_stderr:
err = open(errfile,"r")
result = CommandLineAppResult(out,err,exit_status,\
result_paths=self._get_result_paths(data))
# Clean up the input file if one was created
if remove_tmp:
if self._input_filename:
remove(self._input_filename)
self._input_filename = None
return result
def _get_result_paths(self, data):
result = {}
result['Tree'] = ResultPath(Path=self._outfile)
return result
def build_tree_from_alignment(aln, moltype, best_tree=False, params=None):
"""Returns a tree from alignment
Will check MolType of aln object
"""
if params is None:
params = {}
if moltype == DNA or moltype == RNA:
params['-nt'] = True
elif moltype == PROTEIN:
params['-nt'] = False
else:
raise ValueError, \
"FastTree does not support moltype: %s" % moltype.label
if best_tree:
params['-slow'] = True
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = aln.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
app = FastTree(params=params)
result = app(int_map.toFasta())
tree = DndParser(result['Tree'].read(), constructor=PhyloNode)
#remap tip names
for tip in tree.tips():
tip.Name = int_keys[tip.Name]
return tree
|
{
"content_hash": "a67e37302482713db6d51c74d68958a9",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 79,
"avg_line_length": 39.5,
"alnum_prop": 0.5969936708860759,
"repo_name": "sauloal/cnidaria",
"id": "3f0c2ff2169fc45c6d2b908ac68e52dd69b0dd38",
"size": "6342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/venv/lib/python2.7/site-packages/cogent/app/fasttree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1696790"
},
{
"name": "C++",
"bytes": "3035466"
},
{
"name": "CSS",
"bytes": "20306"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "Groff",
"bytes": "32478"
},
{
"name": "HTML",
"bytes": "19658"
},
{
"name": "JavaScript",
"bytes": "250616"
},
{
"name": "Jupyter Notebook",
"bytes": "8401292"
},
{
"name": "M4",
"bytes": "3905"
},
{
"name": "Makefile",
"bytes": "177650"
},
{
"name": "Objective-C",
"bytes": "1701"
},
{
"name": "Python",
"bytes": "28122291"
},
{
"name": "R",
"bytes": "86108"
},
{
"name": "Shell",
"bytes": "676123"
}
],
"symlink_target": ""
}
|
"""Unit tests for update_webgl_conformance_tests."""
import unittest
from webkitpy.webgl import update_webgl_conformance_tests as webgl
def construct_script(name):
return "<script src=\"" + name + "\"></script>\n"
def construct_style(name):
return "<link rel=\"stylesheet\" href=\"" + name + "\">"
class TestTranslation(unittest.TestCase):
def assert_unchanged(self, text):
self.assertEqual(text, webgl.translate_khronos_test(text))
def assert_translate(self, input, output):
self.assertEqual(output, webgl.translate_khronos_test(input))
def test_simple_unchanged(self):
self.assert_unchanged("")
self.assert_unchanged("<html></html>")
def test_header_strip(self):
single_line_header = "<!-- single line header. -->"
multi_line_header = """<!-- this is a multi-line
header. it should all be removed too.
-->"""
text = "<html></html>"
self.assert_translate(single_line_header, "")
self.assert_translate(single_line_header + text, text)
self.assert_translate(multi_line_header + text, text)
def dont_strip_other_headers(self):
self.assert_unchanged("<html>\n<!-- don't remove comments on other lines. -->\n</html>")
def test_include_rewriting(self):
# Mappings to None are unchanged
styles = {
"../resources/js-test-style.css": "../../js/resources/js-test-style.css",
"fail.css": None,
"resources/stylesheet.css": None,
"../resources/style.css": None,
}
scripts = {
"../resources/js-test-pre.js": "../../js/resources/js-test-pre.js",
"../resources/js-test-post.js": "../../js/resources/js-test-post.js",
"../resources/desktop-gl-constants.js": "resources/desktop-gl-constants.js",
"resources/shadow-offset.js": None,
"../resources/js-test-post-async.js": None,
}
input_text = ""
output_text = ""
for input, output in styles.items():
input_text += construct_style(input)
output_text += construct_style(output if output else input)
for input, output in scripts.items():
input_text += construct_script(input)
output_text += construct_script(output if output else input)
head = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">\n<html>\n<head>\n'
foot = '</head>\n<body>\n</body>\n</html>'
input_text = head + input_text + foot
output_text = head + output_text + foot
self.assert_translate(input_text, output_text)
|
{
"content_hash": "6c6b5c874948fb6875c544343d362d7c",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 96,
"avg_line_length": 37.25352112676056,
"alnum_prop": 0.5928166351606805,
"repo_name": "wuhengzhi/chromium-crosswalk",
"id": "e3e3a52b572a3a4ede86fb7df5747f6d848f1d6d",
"size": "4175",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/webgl/update_webgl_conformance_tests_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys
import matplotlib
import numpy as np
from mpl_toolkits.basemap import Basemap
from array import array
import matplotlib.pyplot as plt
import urllib
import datetime
import dateutil.parser
import EQMethods
##############################################################################
def main(argv=None):
NELat = float(sys.argv[1])
NELng = float(sys.argv[2])
SWLat = float(sys.argv[3])
SWLng = float(sys.argv[4])
MagLo = float(sys.argv[5])
Location = str(sys.argv[6])
# EQMethods.get_catalog(NELat, NELng, SWLat, SWLng, MagLo)
EQMethods.histogram_eps_region_circle(NELat, NELng, SWLat, SWLng, MagLo, Location)
#
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "b33aa2d85ba788e51c90e1db3d48d053",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 86,
"avg_line_length": 21.285714285714285,
"alnum_prop": 0.5879194630872483,
"repo_name": "jbrundle/earthquake-forecasts",
"id": "ea8da73eb753d0205f7c58f2e7e6fe9faed8e8ed",
"size": "1374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot_EQ_EPS_Region_Circle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "IDL",
"bytes": "1736"
},
{
"name": "Prolog",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "424406"
},
{
"name": "Shell",
"bytes": "56"
}
],
"symlink_target": ""
}
|
"""Generate template values for an interface.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
from collections import defaultdict
import itertools
from operator import itemgetter
import idl_definitions
from idl_definitions import IdlOperation, IdlArgument
import idl_types
from idl_types import IdlType, inherits_interface
import v8_attributes
from v8_globals import includes
import v8_methods
import v8_types
from v8_types import cpp_ptr_type, cpp_template_type
import v8_utilities
from v8_utilities import (cpp_name_or_partial, capitalize, conditional_string, cpp_name, gc_type,
has_extended_attribute_value, runtime_enabled_function_name,
extended_attribute_value_as_list, is_legacy_interface_type_checking)
INTERFACE_H_INCLUDES = frozenset([
'bindings/core/v8/ScriptWrappable.h',
'bindings/core/v8/ToV8.h',
'bindings/core/v8/V8Binding.h',
'bindings/core/v8/V8DOMWrapper.h',
'bindings/core/v8/WrapperTypeInfo.h',
'platform/heap/Handle.h',
])
INTERFACE_CPP_INCLUDES = frozenset([
'bindings/core/v8/ExceptionState.h',
'bindings/core/v8/V8DOMConfiguration.h',
'bindings/core/v8/V8HiddenValue.h',
'bindings/core/v8/V8ObjectConstructor.h',
'core/dom/ContextFeatures.h',
'core/dom/Document.h',
'platform/RuntimeEnabledFeatures.h',
'platform/TraceEvent.h',
'wtf/GetPtr.h',
'wtf/RefPtr.h',
])
def interface_context(interface):
includes.clear()
includes.update(INTERFACE_CPP_INCLUDES)
header_includes = set(INTERFACE_H_INCLUDES)
if interface.is_partial:
# A partial interface definition cannot specify that the interface
# inherits from another interface. Inheritance must be specified on
# the original interface definition.
parent_interface = None
is_event_target = False
# partial interface needs the definition of its original interface.
includes.add('bindings/core/v8/V8%s.h' % interface.name)
else:
parent_interface = interface.parent
if parent_interface:
header_includes.update(v8_types.includes_for_interface(parent_interface))
is_event_target = inherits_interface(interface.name, 'EventTarget')
extended_attributes = interface.extended_attributes
is_array_buffer_or_view = interface.idl_type.is_array_buffer_or_view
is_typed_array_type = interface.idl_type.is_typed_array
if is_array_buffer_or_view:
includes.add('bindings/core/v8/V8ArrayBuffer.h')
if interface.name == 'ArrayBuffer':
includes.add('core/dom/DOMArrayBufferDeallocationObserver.h')
if interface.name == 'ArrayBufferView':
includes.update((
'bindings/core/v8/V8Int8Array.h',
'bindings/core/v8/V8Int16Array.h',
'bindings/core/v8/V8Int32Array.h',
'bindings/core/v8/V8Uint8Array.h',
'bindings/core/v8/V8Uint8ClampedArray.h',
'bindings/core/v8/V8Uint16Array.h',
'bindings/core/v8/V8Uint32Array.h',
'bindings/core/v8/V8Float32Array.h',
'bindings/core/v8/V8Float64Array.h',
'bindings/core/v8/V8DataView.h'))
# [ActiveDOMObject]
is_active_dom_object = 'ActiveDOMObject' in extended_attributes
# [CheckSecurity]
is_check_security = 'CheckSecurity' in extended_attributes
if is_check_security:
includes.add('bindings/core/v8/BindingSecurity.h')
# [DependentLifetime]
is_dependent_lifetime = 'DependentLifetime' in extended_attributes
# [MeasureAs]
is_measure_as = 'MeasureAs' in extended_attributes
if is_measure_as:
includes.add('core/frame/UseCounter.h')
# [SetWrapperReferenceFrom]
reachable_node_function = extended_attributes.get('SetWrapperReferenceFrom')
if reachable_node_function:
includes.update(['bindings/core/v8/V8GCController.h',
'core/dom/Element.h'])
# [SetWrapperReferenceTo]
set_wrapper_reference_to_list = [{
'name': argument.name,
# FIXME: properly should be:
# 'cpp_type': argument.idl_type.cpp_type_args(raw_type=True),
# (if type is non-wrapper type like NodeFilter, normally RefPtr)
# Raw pointers faster though, and NodeFilter hacky anyway.
'cpp_type': argument.idl_type.implemented_as + '*',
'idl_type': argument.idl_type,
'v8_type': v8_types.v8_type(argument.idl_type.name),
} for argument in extended_attributes.get('SetWrapperReferenceTo', [])]
for set_wrapper_reference_to in set_wrapper_reference_to_list:
set_wrapper_reference_to['idl_type'].add_includes_for_type()
# [SetWrapperReferenceFrom]
has_visit_dom_wrapper = (
has_extended_attribute_value(interface, 'Custom', 'VisitDOMWrapper') or
reachable_node_function or
set_wrapper_reference_to_list)
this_gc_type = gc_type(interface)
wrapper_class_id = ('NodeClassId' if inherits_interface(interface.name, 'Node') else 'ObjectClassId')
v8_class_name = v8_utilities.v8_class_name(interface)
cpp_class_name = cpp_name(interface)
cpp_class_name_or_partial = cpp_name_or_partial(interface)
v8_class_name_or_partial = v8_utilities.v8_class_name_or_partial(interface)
context = {
'conditional_string': conditional_string(interface), # [Conditional]
'cpp_class': cpp_class_name,
'cpp_class_or_partial': cpp_class_name_or_partial,
'event_target_inheritance': 'InheritFromEventTarget' if is_event_target else 'NotInheritFromEventTarget',
'gc_type': this_gc_type,
# FIXME: Remove 'EventTarget' special handling, http://crbug.com/383699
'has_access_check_callbacks': (is_check_security and
interface.name != 'Window' and
interface.name != 'EventTarget'),
'has_custom_legacy_call_as_function': has_extended_attribute_value(interface, 'Custom', 'LegacyCallAsFunction'), # [Custom=LegacyCallAsFunction]
'has_custom_to_v8': has_extended_attribute_value(interface, 'Custom', 'ToV8'), # [Custom=ToV8]
'has_partial_interface': len(interface.partial_interfaces) > 0,
'has_visit_dom_wrapper': has_visit_dom_wrapper,
'header_includes': header_includes,
'interface_name': interface.name,
'is_active_dom_object': is_active_dom_object,
'is_array_buffer_or_view': is_array_buffer_or_view,
'is_check_security': is_check_security,
'is_event_target': is_event_target,
'is_exception': interface.is_exception,
'is_node': inherits_interface(interface.name, 'Node'),
'is_partial': interface.is_partial,
'is_typed_array_type': is_typed_array_type,
'lifetime': 'Dependent'
if (has_visit_dom_wrapper or
is_active_dom_object or
is_dependent_lifetime)
else 'Independent',
'measure_as': v8_utilities.measure_as(interface, None), # [MeasureAs]
'parent_interface': parent_interface,
'pass_cpp_type': cpp_template_type(
cpp_ptr_type('PassRefPtr', 'RawPtr', this_gc_type),
cpp_name(interface)),
'reachable_node_function': reachable_node_function,
'runtime_enabled_function': runtime_enabled_function_name(interface), # [RuntimeEnabled]
'set_wrapper_reference_to_list': set_wrapper_reference_to_list,
'v8_class': v8_class_name,
'v8_class_or_partial': v8_class_name_or_partial,
'wrapper_class_id': wrapper_class_id,
}
# Constructors
constructors = [constructor_context(interface, constructor)
for constructor in interface.constructors
# FIXME: shouldn't put named constructors with constructors
# (currently needed for Perl compatibility)
# Handle named constructors separately
if constructor.name == 'Constructor']
if len(constructors) > 1:
context['constructor_overloads'] = overloads_context(interface, constructors)
# [CustomConstructor]
custom_constructors = [{ # Only needed for computing interface length
'number_of_required_arguments':
number_of_required_arguments(constructor),
} for constructor in interface.custom_constructors]
# [EventConstructor]
has_event_constructor = 'EventConstructor' in extended_attributes
any_type_attributes = [attribute for attribute in interface.attributes
if attribute.idl_type.name == 'Any']
if has_event_constructor:
includes.add('bindings/core/v8/Dictionary.h')
if any_type_attributes:
includes.add('bindings/core/v8/SerializedScriptValue.h')
includes.add('bindings/core/v8/SerializedScriptValueFactory.h')
# [NamedConstructor]
named_constructor = named_constructor_context(interface)
if constructors or custom_constructors or has_event_constructor or named_constructor:
if interface.is_partial:
raise Exception('[Constructor] and [NamedConstructor] MUST NOT be'
' specified on partial interface definitions:'
'%s' % interface.name)
includes.add('bindings/core/v8/V8ObjectConstructor.h')
includes.add('core/frame/LocalDOMWindow.h')
context.update({
'any_type_attributes': any_type_attributes,
'constructors': constructors,
'has_custom_constructor': bool(custom_constructors),
'has_event_constructor': has_event_constructor,
'interface_length':
interface_length(interface, constructors + custom_constructors),
'is_constructor_raises_exception': extended_attributes.get('RaisesException') == 'Constructor', # [RaisesException=Constructor]
'named_constructor': named_constructor,
})
constants = [constant_context(constant, interface) for constant in interface.constants]
special_getter_constants = []
runtime_enabled_constants = []
constant_configuration_constants = []
for constant in constants:
if constant['measure_as'] or constant['deprecate_as']:
special_getter_constants.append(constant)
continue
if constant['runtime_enabled_function']:
runtime_enabled_constants.append(constant)
continue
constant_configuration_constants.append(constant)
# Constants
context.update({
'constant_configuration_constants': constant_configuration_constants,
'constants': constants,
'do_not_check_constants': 'DoNotCheckConstants' in extended_attributes,
'has_constant_configuration': any(
not constant['runtime_enabled_function']
for constant in constants),
'runtime_enabled_constants': runtime_enabled_constants,
'special_getter_constants': special_getter_constants,
})
# Attributes
attributes = [v8_attributes.attribute_context(interface, attribute)
for attribute in interface.attributes]
has_conditional_attributes = any(attribute['per_context_enabled_function'] or attribute['exposed_test'] for attribute in attributes)
if has_conditional_attributes and interface.is_partial:
raise Exception('Conditional attributes between partial interfaces in modules and the original interfaces(%s) in core are not allowed.' % interface.name)
context.update({
'attributes': attributes,
'has_accessor_configuration': any(
attribute['is_expose_js_accessors'] and
not (attribute['is_static'] or
attribute['runtime_enabled_function'] or
attribute['per_context_enabled_function']) and
attribute['should_be_exposed_to_script']
for attribute in attributes),
'has_attribute_configuration': any(
not (attribute['is_expose_js_accessors'] or
attribute['is_static'] or
attribute['runtime_enabled_function'] or
attribute['per_context_enabled_function'])
and attribute['should_be_exposed_to_script']
for attribute in attributes),
'has_conditional_attributes': has_conditional_attributes,
'has_constructor_attributes': any(attribute['constructor_type'] for attribute in attributes),
'has_replaceable_attributes': any(attribute['is_replaceable'] for attribute in attributes),
})
# Methods
methods = []
if interface.original_interface:
methods.extend([v8_methods.method_context(interface, operation, is_visible=False)
for operation in interface.original_interface.operations
if operation.name])
methods.extend([v8_methods.method_context(interface, method)
for method in interface.operations
if method.name]) # Skip anonymous special operations (methods)
if interface.partial_interfaces:
assert len(interface.partial_interfaces) == len(set(interface.partial_interfaces))
for partial_interface in interface.partial_interfaces:
methods.extend([v8_methods.method_context(interface, operation, is_visible=False)
for operation in partial_interface.operations
if operation.name])
compute_method_overloads_context(interface, methods)
def generated_method(return_type, name, arguments=None, extended_attributes=None, implemented_as=None):
operation = IdlOperation(interface.idl_name)
operation.idl_type = return_type
operation.name = name
if arguments:
operation.arguments = arguments
if extended_attributes:
operation.extended_attributes.update(extended_attributes)
if implemented_as is None:
implemented_as = name + 'ForBinding'
operation.extended_attributes['ImplementedAs'] = implemented_as
return v8_methods.method_context(interface, operation)
def generated_argument(idl_type, name, is_optional=False, extended_attributes=None):
argument = IdlArgument(interface.idl_name)
argument.idl_type = idl_type
argument.name = name
argument.is_optional = is_optional
if extended_attributes:
argument.extended_attributes.update(extended_attributes)
return argument
# [Iterable], iterable<>, maplike<> and setlike<>
iterator_method = None
# FIXME: support Iterable in partial interfaces. However, we don't
# need to support iterator overloads between interface and
# partial interface definitions.
# http://heycam.github.io/webidl/#idl-overloading
if (not interface.is_partial
and (interface.iterable or interface.maplike or interface.setlike
or 'Iterable' in extended_attributes)):
used_extended_attributes = {}
if interface.iterable:
used_extended_attributes.update(interface.iterable.extended_attributes)
elif interface.maplike:
used_extended_attributes.update(interface.maplike.extended_attributes)
elif interface.setlike:
used_extended_attributes.update(interface.setlike.extended_attributes)
if 'RaisesException' in used_extended_attributes:
raise ValueError('[RaisesException] is implied for iterable<>/maplike<>/setlike<>')
if 'CallWith' in used_extended_attributes:
raise ValueError('[CallWith=ScriptState] is implied for iterable<>/maplike<>/setlike<>')
used_extended_attributes.update({
'RaisesException': None,
'CallWith': 'ScriptState',
})
forEach_extended_attributes = used_extended_attributes.copy()
forEach_extended_attributes.update({
'CallWith': ['ScriptState', 'ThisValue'],
})
def generated_iterator_method(name, implemented_as=None):
return generated_method(
return_type=IdlType('Iterator'),
name=name,
extended_attributes=used_extended_attributes,
implemented_as=implemented_as)
iterator_method = generated_iterator_method('iterator', implemented_as='iterator')
if interface.iterable or interface.maplike or interface.setlike:
implicit_methods = [
generated_iterator_method('keys'),
generated_iterator_method('values'),
generated_iterator_method('entries'),
# void forEach(Function callback, [Default=Undefined] optional any thisArg)
generated_method(IdlType('void'), 'forEach',
arguments=[generated_argument(IdlType('Function'), 'callback'),
generated_argument(IdlType('any'), 'thisArg',
is_optional=True,
extended_attributes={'Default': 'Undefined'})],
extended_attributes=forEach_extended_attributes),
]
if interface.maplike:
key_argument = generated_argument(interface.maplike.key_type, 'key')
value_argument = generated_argument(interface.maplike.value_type, 'value')
implicit_methods.extend([
generated_method(IdlType('boolean'), 'has',
arguments=[key_argument],
extended_attributes=used_extended_attributes),
generated_method(IdlType('any'), 'get',
arguments=[key_argument],
extended_attributes=used_extended_attributes),
])
if not interface.maplike.is_read_only:
implicit_methods.extend([
generated_method(IdlType('void'), 'clear',
extended_attributes=used_extended_attributes),
generated_method(IdlType('boolean'), 'delete',
arguments=[key_argument],
extended_attributes=used_extended_attributes),
generated_method(IdlType(interface.name), 'set',
arguments=[key_argument, value_argument],
extended_attributes=used_extended_attributes),
])
if interface.setlike:
value_argument = generated_argument(interface.setlike.value_type, 'value')
implicit_methods.extend([
generated_method(IdlType('boolean'), 'has',
arguments=[value_argument],
extended_attributes=used_extended_attributes),
])
if not interface.setlike.is_read_only:
implicit_methods.extend([
generated_method(IdlType(interface.name), 'add',
arguments=[value_argument],
extended_attributes=used_extended_attributes),
generated_method(IdlType('void'), 'clear',
extended_attributes=used_extended_attributes),
generated_method(IdlType('boolean'), 'delete',
arguments=[value_argument],
extended_attributes=used_extended_attributes),
])
methods_by_name = {}
for method in methods:
methods_by_name.setdefault(method['name'], []).append(method)
for implicit_method in implicit_methods:
if implicit_method['name'] in methods_by_name:
# FIXME: Check that the existing method is compatible.
continue
methods.append(implicit_method)
# FIXME: maplike<> and setlike<> should also imply the presence of a
# 'size' attribute.
# Stringifier
if interface.stringifier:
stringifier = interface.stringifier
stringifier_ext_attrs = stringifier.extended_attributes.copy()
if stringifier.attribute:
stringifier_ext_attrs['ImplementedAs'] = stringifier.attribute.name
elif stringifier.operation:
stringifier_ext_attrs['ImplementedAs'] = stringifier.operation.name
methods.append(generated_method(
return_type=IdlType('DOMString'),
name='toString',
extended_attributes=stringifier_ext_attrs,
implemented_as='toString'))
conditionally_enabled_methods = []
custom_registration_methods = []
method_configuration_methods = []
for method in methods:
# Skip all but one method in each set of overloaded methods.
if 'overload_index' in method and 'overloads' not in method:
continue
if 'overloads' in method:
overloads = method['overloads']
if not overloads['visible']:
continue
# original interface will register instead of partial interface.
if overloads['has_partial_overloads'] and interface.is_partial:
continue
per_context_enabled_function = overloads['per_context_enabled_function_all']
conditionally_exposed_function = overloads['exposed_test_all']
runtime_enabled_function = overloads['runtime_enabled_function_all']
has_custom_registration = (overloads['has_custom_registration_all'] or
overloads['runtime_determined_lengths'])
else:
if not method['visible']:
continue
per_context_enabled_function = method['per_context_enabled_function']
conditionally_exposed_function = method['exposed_test']
runtime_enabled_function = method['runtime_enabled_function']
has_custom_registration = method['has_custom_registration']
if has_custom_registration:
custom_registration_methods.append(method)
continue
if per_context_enabled_function or conditionally_exposed_function:
conditionally_enabled_methods.append(method)
continue
if runtime_enabled_function:
custom_registration_methods.append(method)
continue
if method['should_be_exposed_to_script']:
method_configuration_methods.append(method)
for method in methods:
# The value of the Function object’s “length” property is a Number
# determined as follows:
# 1. Let S be the effective overload set for regular operations (if the
# operation is a regular operation) or for static operations (if the
# operation is a static operation) with identifier id on interface I and
# with argument count 0.
# 2. Return the length of the shortest argument list of the entries in S.
# FIXME: This calculation doesn't take into account whether runtime
# enabled overloads are actually enabled, so length may be incorrect.
# E.g., [RuntimeEnabled=Foo] void f(); void f(long x);
# should have length 1 if Foo is not enabled, but length 0 if it is.
method['length'] = (method['overloads']['minarg'] if 'overloads' in method else
method['number_of_required_arguments'])
context.update({
'conditionally_enabled_methods': conditionally_enabled_methods,
'custom_registration_methods': custom_registration_methods,
'has_origin_safe_method_setter': any(
method['is_check_security_for_frame'] and not method['is_read_only']
for method in methods),
'has_private_script': any(attribute['is_implemented_in_private_script'] for attribute in attributes) or
any(method['is_implemented_in_private_script'] for method in methods),
'iterator_method': iterator_method,
'method_configuration_methods': method_configuration_methods,
'methods': methods,
})
context.update({
'indexed_property_getter': property_getter(interface.indexed_property_getter, ['index']),
'indexed_property_setter': property_setter(interface.indexed_property_setter, interface),
'indexed_property_deleter': property_deleter(interface.indexed_property_deleter),
'is_override_builtins': 'OverrideBuiltins' in extended_attributes,
'named_property_getter': property_getter(interface.named_property_getter, ['propertyName']),
'named_property_setter': property_setter(interface.named_property_setter, interface),
'named_property_deleter': property_deleter(interface.named_property_deleter),
})
return context
# [DeprecateAs], [Reflect], [RuntimeEnabled]
def constant_context(constant, interface):
extended_attributes = constant.extended_attributes
return {
'cpp_class': extended_attributes.get('PartialInterfaceImplementedAs'),
'deprecate_as': v8_utilities.deprecate_as(constant), # [DeprecateAs]
'idl_type': constant.idl_type.name,
'measure_as': v8_utilities.measure_as(constant, interface), # [MeasureAs]
'name': constant.name,
# FIXME: use 'reflected_name' as correct 'name'
'reflected_name': extended_attributes.get('Reflect', constant.name),
'runtime_enabled_function': runtime_enabled_function_name(constant),
'value': constant.value,
}
################################################################################
# Overloads
################################################################################
def compute_method_overloads_context(interface, methods):
# Regular methods
compute_method_overloads_context_by_type(
interface, [method for method in methods if not method['is_static']])
# Static methods
compute_method_overloads_context_by_type(
interface, [method for method in methods if method['is_static']])
def compute_method_overloads_context_by_type(interface, methods):
"""Computes |method.overload*| template values.
Called separately for static and non-static (regular) methods,
as these are overloaded separately.
Modifies |method| in place for |method| in |methods|.
Doesn't change the |methods| list itself (only the values, i.e. individual
methods), so ok to treat these separately.
"""
# Add overload information only to overloaded methods, so template code can
# easily verify if a function is overloaded
for name, overloads in method_overloads_by_name(methods):
# Resolution function is generated after last overloaded function;
# package necessary information into |method.overloads| for that method.
overloads[-1]['overloads'] = overloads_context(interface, overloads)
overloads[-1]['overloads']['name'] = name
def method_overloads_by_name(methods):
"""Returns generator of overloaded methods by name: [name, [method]]"""
# Filter to only methods that are actually overloaded
method_counts = Counter(method['name'] for method in methods)
overloaded_method_names = set(name
for name, count in method_counts.iteritems()
if count > 1)
overloaded_methods = [method for method in methods
if method['name'] in overloaded_method_names]
# Group by name (generally will be defined together, but not necessarily)
return sort_and_groupby(overloaded_methods, itemgetter('name'))
def overloads_context(interface, overloads):
"""Returns |overloads| template values for a single name.
Sets |method.overload_index| in place for |method| in |overloads|
and returns dict of overall overload template values.
"""
assert len(overloads) > 1 # only apply to overloaded names
for index, method in enumerate(overloads, 1):
method['overload_index'] = index
effective_overloads_by_length = effective_overload_set_by_length(overloads)
lengths = [length for length, _ in effective_overloads_by_length]
name = overloads[0].get('name', '<constructor>')
# Check if all overloads with the shortest acceptable arguments list are
# runtime enabled, in which case we need to have a runtime determined
# Function.length. The exception is if all overloads are controlled by the
# same runtime enabled feature, in which case there would be no function
# object at all if it is not enabled.
shortest_overloads = effective_overloads_by_length[0][1]
if (all(method.get('runtime_enabled_function')
for method, _, _ in shortest_overloads) and
not common_value(overloads, 'runtime_enabled_function')):
# Generate a list of (length, runtime_enabled_functions) tuples.
runtime_determined_lengths = []
for length, effective_overloads in effective_overloads_by_length:
runtime_enabled_functions = set(
method['runtime_enabled_function']
for method, _, _ in effective_overloads
if method.get('runtime_enabled_function'))
if not runtime_enabled_functions:
# This "length" is unconditionally enabled, so stop here.
runtime_determined_lengths.append((length, [None]))
break
runtime_determined_lengths.append(
(length, sorted(runtime_enabled_functions)))
length = ('%sV8Internal::%sMethodLength()'
% (cpp_name_or_partial(interface), name))
else:
runtime_determined_lengths = None
length = lengths[0]
# Check and fail if overloads disagree on any of the extended attributes
# that affect how the method should be registered.
# Skip the check for overloaded constructors, since they don't support any
# of the extended attributes in question.
if not overloads[0].get('is_constructor'):
overload_extended_attributes = [
method['custom_registration_extended_attributes']
for method in overloads]
for extended_attribute in v8_methods.CUSTOM_REGISTRATION_EXTENDED_ATTRIBUTES:
if common_key(overload_extended_attributes, extended_attribute) is None:
raise ValueError('Overloads of %s have conflicting extended attribute %s'
% (name, extended_attribute))
# Check and fail if overloads disagree about whether the return type
# is a Promise or not.
promise_overload_count = sum(1 for method in overloads if method.get('returns_promise'))
if promise_overload_count not in (0, len(overloads)):
raise ValueError('Overloads of %s have conflicting Promise/non-Promise types'
% (name))
has_overload_visible = False
has_overload_not_visible = False
for overload in overloads:
if overload.get('visible', True):
# If there exists an overload which is visible, need to generate
# overload_resolution, i.e. overlods_visible should be True.
has_overload_visible = True
else:
has_overload_not_visible = True
# If some overloads are not visible and others are visible,
# the method is overloaded between core and modules.
has_partial_overloads = has_overload_visible and has_overload_not_visible
return {
'deprecate_all_as': common_value(overloads, 'deprecate_as'), # [DeprecateAs]
'exposed_test_all': common_value(overloads, 'exposed_test'), # [Exposed]
'has_custom_registration_all': common_value(overloads, 'has_custom_registration'),
'length': length,
'length_tests_methods': length_tests_methods(effective_overloads_by_length),
# 1. Let maxarg be the length of the longest type list of the
# entries in S.
'maxarg': lengths[-1],
'measure_all_as': common_value(overloads, 'measure_as'), # [MeasureAs]
'minarg': lengths[0],
'per_context_enabled_function_all': common_value(overloads, 'per_context_enabled_function'), # [PerContextEnabled]
'returns_promise_all': promise_overload_count > 0,
'runtime_determined_lengths': runtime_determined_lengths,
'runtime_enabled_function_all': common_value(overloads, 'runtime_enabled_function'), # [RuntimeEnabled]
'valid_arities': lengths
# Only need to report valid arities if there is a gap in the
# sequence of possible lengths, otherwise invalid length means
# "not enough arguments".
if lengths[-1] - lengths[0] != len(lengths) - 1 else None,
'visible': has_overload_visible,
'has_partial_overloads': has_partial_overloads,
}
def effective_overload_set(F):
"""Returns the effective overload set of an overloaded function.
An effective overload set is the set of overloaded functions + signatures
(type list of arguments, with optional and variadic arguments included or
not), and is used in the overload resolution algorithm.
For example, given input [f1(optional long x), f2(DOMString s)], the output
is informally [f1(), f1(long), f2(DOMString)], and formally
[(f1, [], []), (f1, [long], [optional]), (f2, [DOMString], [required])].
Currently the optionality list is a list of |is_optional| booleans (True
means optional, False means required); to support variadics this needs to
be tri-valued as required, optional, or variadic.
Formally:
An effective overload set represents the allowable invocations for a
particular operation, constructor (specified with [Constructor] or
[NamedConstructor]), legacy caller or callback function.
An additional argument N (argument count) is needed when overloading
variadics, but we don't use that currently.
Spec: http://heycam.github.io/webidl/#dfn-effective-overload-set
Formally the input and output lists are sets, but methods are stored
internally as dicts, which can't be stored in a set because they are not
hashable, so we use lists instead.
Arguments:
F: list of overloads for a given callable name.
Returns:
S: list of tuples of the form (callable, type list, optionality list).
"""
# Code closely follows the algorithm in the spec, for clarity and
# correctness, and hence is not very Pythonic.
# 1. Initialize S to ∅.
# (We use a list because we can't use a set, as noted above.)
S = []
# 2. Let F be a set with elements as follows, according to the kind of
# effective overload set:
# (Passed as argument, nothing to do.)
# 3. & 4. (maxarg, m) are only needed for variadics, not used.
# 5. For each operation, extended attribute or callback function X in F:
for X in F: # X is the "callable", F is the overloads.
arguments = X['arguments']
# 1. Let n be the number of arguments X is declared to take.
n = len(arguments)
# 2. Let t0..n−1 be a list of types, where ti is the type of X’s
# argument at index i.
# (“type list”)
t = tuple(argument['idl_type_object'] for argument in arguments)
# 3. Let o0..n−1 be a list of optionality values, where oi is “variadic”
# if X’s argument at index i is a final, variadic argument, “optional”
# if the argument is optional, and “required” otherwise.
# (“optionality list”)
# (We’re just using a boolean for optional/variadic vs. required.)
o = tuple(argument['is_optional'] or argument['is_variadic']
for argument in arguments)
# 4. Add to S the tuple <X, t0..n−1, o0..n−1>.
S.append((X, t, o))
# 5. If X is declared to be variadic, then:
# (Not used, so not implemented.)
# 6. Initialize i to n−1.
i = n - 1
# 7. While i ≥ 0:
# Spec bug (fencepost error); should be “While i > 0:”
# https://www.w3.org/Bugs/Public/show_bug.cgi?id=25590
while i > 0:
# 1. If argument i of X is not optional, then break this loop.
if not o[i]:
break
# 2. Otherwise, add to S the tuple <X, t0..i−1, o0..i−1>.
S.append((X, t[:i], o[:i]))
# 3. Set i to i−1.
i = i - 1
# 8. If n > 0 and all arguments of X are optional, then add to S the
# tuple <X, (), ()> (where “()” represents the empty list).
if n > 0 and all(oi for oi in o):
S.append((X, [], []))
# 6. The effective overload set is S.
return S
def effective_overload_set_by_length(overloads):
def type_list_length(entry):
# Entries in the effective overload set are 3-tuples:
# (callable, type list, optionality list)
return len(entry[1])
effective_overloads = effective_overload_set(overloads)
return list(sort_and_groupby(effective_overloads, type_list_length))
def distinguishing_argument_index(entries):
"""Returns the distinguishing argument index for a sequence of entries.
Entries are elements of the effective overload set with the same number
of arguments (formally, same type list length), each a 3-tuple of the form
(callable, type list, optionality list).
Spec: http://heycam.github.io/webidl/#dfn-distinguishing-argument-index
If there is more than one entry in an effective overload set that has a
given type list length, then for those entries there must be an index i
such that for each pair of entries the types at index i are
distinguishable.
The lowest such index is termed the distinguishing argument index for the
entries of the effective overload set with the given type list length.
"""
# Only applicable “If there is more than one entry”
assert len(entries) > 1
type_lists = [tuple(idl_type.name for idl_type in entry[1])
for entry in entries]
type_list_length = len(type_lists[0])
# Only applicable for entries that “[have] a given type list length”
assert all(len(type_list) == type_list_length for type_list in type_lists)
name = entries[0][0].get('name', 'Constructor') # for error reporting
# The spec defines the distinguishing argument index by conditions it must
# satisfy, but does not give an algorithm.
#
# We compute the distinguishing argument index by first computing the
# minimum index where not all types are the same, and then checking that
# all types in this position are distinguishable (and the optionality lists
# up to this point are identical), since "minimum index where not all types
# are the same" is a *necessary* condition, and more direct to check than
# distinguishability.
types_by_index = (set(types) for types in zip(*type_lists))
try:
# “In addition, for each index j, where j is less than the
# distinguishing argument index for a given type list length, the types
# at index j in all of the entries’ type lists must be the same”
index = next(i for i, types in enumerate(types_by_index)
if len(types) > 1)
except StopIteration:
raise ValueError('No distinguishing index found for %s, length %s:\n'
'All entries have the same type list:\n'
'%s' % (name, type_list_length, type_lists[0]))
# Check optionality
# “and the booleans in the corresponding list indicating argument
# optionality must be the same.”
# FIXME: spec typo: optionality value is no longer a boolean
# https://www.w3.org/Bugs/Public/show_bug.cgi?id=25628
initial_optionality_lists = set(entry[2][:index] for entry in entries)
if len(initial_optionality_lists) > 1:
raise ValueError(
'Invalid optionality lists for %s, length %s:\n'
'Optionality lists differ below distinguishing argument index %s:\n'
'%s'
% (name, type_list_length, index, set(initial_optionality_lists)))
# Check distinguishability
# http://heycam.github.io/webidl/#dfn-distinguishable
# Use names to check for distinct types, since objects are distinct
# FIXME: check distinguishability more precisely, for validation
distinguishing_argument_type_names = [type_list[index]
for type_list in type_lists]
if (len(set(distinguishing_argument_type_names)) !=
len(distinguishing_argument_type_names)):
raise ValueError('Types in distinguishing argument are not distinct:\n'
'%s' % distinguishing_argument_type_names)
return index
def length_tests_methods(effective_overloads_by_length):
"""Returns sorted list of resolution tests and associated methods, by length.
This builds the main data structure for the overload resolution loop.
For a given argument length, bindings test argument at distinguishing
argument index, in order given by spec: if it is compatible with
(optionality or) type required by an overloaded method, resolve to that
method.
Returns:
[(length, [(test, method)])]
"""
return [(length, list(resolution_tests_methods(effective_overloads)))
for length, effective_overloads in effective_overloads_by_length]
def resolution_tests_methods(effective_overloads):
"""Yields resolution test and associated method, in resolution order, for effective overloads of a given length.
This is the heart of the resolution algorithm.
http://heycam.github.io/webidl/#dfn-overload-resolution-algorithm
Note that a given method can be listed multiple times, with different tests!
This is to handle implicit type conversion.
Returns:
[(test, method)]
"""
methods = [effective_overload[0]
for effective_overload in effective_overloads]
if len(methods) == 1:
# If only one method with a given length, no test needed
yield 'true', methods[0]
return
# 6. If there is more than one entry in S, then set d to be the
# distinguishing argument index for the entries of S.
index = distinguishing_argument_index(effective_overloads)
# (7-9 are for handling |undefined| values for optional arguments before
# the distinguishing argument (as “missing”), so you can specify only some
# optional arguments. We don’t support this, so we skip these steps.)
# 10. If i = d, then:
# (d is the distinguishing argument index)
# 1. Let V be argi.
# Note: This is the argument that will be used to resolve which
# overload is selected.
cpp_value = 'info[%s]' % index
# Extract argument and IDL type to simplify accessing these in each loop.
arguments = [method['arguments'][index] for method in methods]
arguments_methods = zip(arguments, methods)
idl_types = [argument['idl_type_object'] for argument in arguments]
idl_types_methods = zip(idl_types, methods)
# We can’t do a single loop through all methods or simply sort them, because
# a method may be listed in multiple steps of the resolution algorithm, and
# which test to apply differs depending on the step.
#
# Instead, we need to go through all methods at each step, either finding
# first match (if only one test is allowed) or filtering to matches (if
# multiple tests are allowed), and generating an appropriate tests.
# 2. If V is undefined, and there is an entry in S whose list of
# optionality values has “optional” at index i, then remove from S all
# other entries.
try:
method = next(method for argument, method in arguments_methods
if argument['is_optional'])
test = '%s->IsUndefined()' % cpp_value
yield test, method
except StopIteration:
pass
# 3. Otherwise: if V is null or undefined, and there is an entry in S that
# has one of the following types at position i of its type list,
# • a nullable type
try:
method = next(method for idl_type, method in idl_types_methods
if idl_type.is_nullable)
test = 'isUndefinedOrNull(%s)' % cpp_value
yield test, method
except StopIteration:
pass
# 4. Otherwise: if V is a platform object – but not a platform array
# object – and there is an entry in S that has one of the following
# types at position i of its type list,
# • an interface type that V implements
# (Unlike most of these tests, this can return multiple methods, since we
# test if it implements an interface. Thus we need a for loop, not a next.)
# (We distinguish wrapper types from built-in interface types.)
for idl_type, method in ((idl_type, method)
for idl_type, method in idl_types_methods
if idl_type.is_wrapper_type):
test = 'V8{idl_type}::hasInstance({cpp_value}, info.GetIsolate())'.format(idl_type=idl_type.base_type, cpp_value=cpp_value)
yield test, method
# 13. Otherwise: if IsCallable(V) is true, and there is an entry in S that
# has one of the following types at position i of its type list,
# • a callback function type
# ...
#
# FIXME:
# We test for functions rather than callability, which isn't strictly the
# same thing.
try:
method = next(method for idl_type, method in idl_types_methods
if idl_type.is_callback_function)
test = '%s->IsFunction()' % cpp_value
yield test, method
except StopIteration:
pass
# 14. Otherwise: if V is any kind of object except for a native Date object,
# a native RegExp object, and there is an entry in S that has one of the
# following types at position i of its type list,
# • a sequence type
# ...
#
# 15. Otherwise: if V is any kind of object except for a native Date object,
# a native RegExp object, and there is an entry in S that has one of the
# following types at position i of its type list,
# • an array type
# ...
# • a dictionary
#
# FIXME:
# We don't strictly follow the algorithm here. The algorithm says "remove
# all other entries" if there is "one entry" matching, but we yield all
# entries to support following constructors:
# [constructor(sequence<DOMString> arg), constructor(Dictionary arg)]
# interface I { ... }
# (Need to check array types before objects because an array is an object)
for idl_type, method in idl_types_methods:
if idl_type.native_array_element_type:
# (We test for Array instead of generic Object to type-check.)
# FIXME: test for Object during resolution, then have type check for
# Array in overloaded method: http://crbug.com/262383
yield '%s->IsArray()' % cpp_value, method
for idl_type, method in idl_types_methods:
if idl_type.is_dictionary or idl_type.name == 'Dictionary':
# FIXME: should be '{1}->IsObject() && !{1}->IsDate() && !{1}->IsRegExp()'.format(cpp_value)
# FIXME: the IsDate and IsRegExp checks can be skipped if we've
# already generated tests for them.
yield '%s->IsObject()' % cpp_value, method
# (Check for exact type matches before performing automatic type conversion;
# only needed if distinguishing between primitive types.)
if len([idl_type.is_primitive_type for idl_type in idl_types]) > 1:
# (Only needed if match in step 11, otherwise redundant.)
if any(idl_type.is_string_type or idl_type.is_enum
for idl_type in idl_types):
# 10. Otherwise: if V is a Number value, and there is an entry in S
# that has one of the following types at position i of its type
# list,
# • a numeric type
try:
method = next(method for idl_type, method in idl_types_methods
if idl_type.is_numeric_type)
test = '%s->IsNumber()' % cpp_value
yield test, method
except StopIteration:
pass
# (Perform automatic type conversion, in order. If any of these match,
# that’s the end, and no other tests are needed.) To keep this code simple,
# we rely on the C++ compiler's dead code elimination to deal with the
# redundancy if both cases below trigger.
# 11. Otherwise: if there is an entry in S that has one of the following
# types at position i of its type list,
# • DOMString
# • ByteString
# • USVString
# • an enumeration type
try:
method = next(method for idl_type, method in idl_types_methods
if idl_type.is_string_type or idl_type.is_enum)
yield 'true', method
except StopIteration:
pass
# 12. Otherwise: if there is an entry in S that has one of the following
# types at position i of its type list,
# • a numeric type
try:
method = next(method for idl_type, method in idl_types_methods
if idl_type.is_numeric_type)
yield 'true', method
except StopIteration:
pass
################################################################################
# Utility functions
################################################################################
def Counter(iterable):
# Once using Python 2.7, using collections.Counter
counter = defaultdict(lambda: 0)
for item in iterable:
counter[item] += 1
return counter
def common(dicts, f):
"""Returns common result of f across an iterable of dicts, or None.
Call f for each dict and return its result if the same across all dicts.
"""
values = (f(d) for d in dicts)
first_value = next(values)
if all(value == first_value for value in values):
return first_value
return None
def common_key(dicts, key):
"""Returns common presence of a key across an iterable of dicts, or None.
True if all dicts have the key, False if none of the dicts have the key,
and None if some but not all dicts have the key.
"""
return common(dicts, lambda d: key in d)
def common_value(dicts, key):
"""Returns common value of a key across an iterable of dicts, or None.
Auxiliary function for overloads, so can consolidate an extended attribute
that appears with the same value on all items in an overload set.
"""
return common(dicts, lambda d: d.get(key))
def sort_and_groupby(l, key=None):
"""Returns a generator of (key, list), sorting and grouping list by key."""
l.sort(key=key)
return ((k, list(g)) for k, g in itertools.groupby(l, key))
################################################################################
# Constructors
################################################################################
# [Constructor]
def constructor_context(interface, constructor):
# [RaisesException=Constructor]
is_constructor_raises_exception = \
interface.extended_attributes.get('RaisesException') == 'Constructor'
return {
'arguments': [v8_methods.argument_context(interface, constructor, argument, index)
for index, argument in enumerate(constructor.arguments)],
'cpp_type': cpp_template_type(
cpp_ptr_type('RefPtr', 'RawPtr', gc_type(interface)),
cpp_name(interface)),
'cpp_value': v8_methods.cpp_value(
interface, constructor, len(constructor.arguments)),
'has_exception_state':
is_constructor_raises_exception or
any(argument for argument in constructor.arguments
if argument.idl_type.name == 'SerializedScriptValue' or
argument.idl_type.v8_conversion_needs_exception_state),
'is_call_with_document':
# [ConstructorCallWith=Document]
has_extended_attribute_value(interface,
'ConstructorCallWith', 'Document'),
'is_call_with_execution_context':
# [ConstructorCallWith=ExecutionContext]
has_extended_attribute_value(interface,
'ConstructorCallWith', 'ExecutionContext'),
'is_call_with_script_state':
# [ConstructorCallWith=ScriptState]
has_extended_attribute_value(
interface, 'ConstructorCallWith', 'ScriptState'),
'is_constructor': True,
'is_named_constructor': False,
'is_raises_exception': is_constructor_raises_exception,
'number_of_required_arguments':
number_of_required_arguments(constructor),
}
# [NamedConstructor]
def named_constructor_context(interface):
extended_attributes = interface.extended_attributes
if 'NamedConstructor' not in extended_attributes:
return None
# FIXME: parser should return named constructor separately;
# included in constructors (and only name stored in extended attribute)
# for Perl compatibility
idl_constructor = interface.constructors[-1]
assert idl_constructor.name == 'NamedConstructor'
context = constructor_context(interface, idl_constructor)
context.update({
'name': extended_attributes['NamedConstructor'],
'is_named_constructor': True,
})
return context
def number_of_required_arguments(constructor):
return len([argument for argument in constructor.arguments
if not argument.is_optional])
def interface_length(interface, constructors):
# Docs: http://heycam.github.io/webidl/#es-interface-call
if 'EventConstructor' in interface.extended_attributes:
return 1
if not constructors:
return 0
return min(constructor['number_of_required_arguments']
for constructor in constructors)
################################################################################
# Special operations (methods)
# http://heycam.github.io/webidl/#idl-special-operations
################################################################################
def property_getter(getter, cpp_arguments):
if not getter:
return None
def is_null_expression(idl_type):
if idl_type.use_output_parameter_for_result:
return 'result.isNull()'
if idl_type.is_string_type:
return 'result.isNull()'
if idl_type.is_interface_type:
return '!result'
if idl_type.base_type in ('any', 'object'):
return 'result.isEmpty()'
return ''
idl_type = getter.idl_type
extended_attributes = getter.extended_attributes
is_call_with_script_state = v8_utilities.has_extended_attribute_value(getter, 'CallWith', 'ScriptState')
is_raises_exception = 'RaisesException' in extended_attributes
use_output_parameter_for_result = idl_type.use_output_parameter_for_result
# FIXME: make more generic, so can use v8_methods.cpp_value
cpp_method_name = 'impl->%s' % cpp_name(getter)
if is_call_with_script_state:
cpp_arguments.insert(0, 'scriptState')
if is_raises_exception:
cpp_arguments.append('exceptionState')
if use_output_parameter_for_result:
cpp_arguments.append('result')
cpp_value = '%s(%s)' % (cpp_method_name, ', '.join(cpp_arguments))
return {
'cpp_type': idl_type.cpp_type,
'cpp_value': cpp_value,
'do_not_check_security': 'DoNotCheckSecurity' in extended_attributes,
'is_call_with_script_state': is_call_with_script_state,
'is_custom':
'Custom' in extended_attributes and
(not extended_attributes['Custom'] or
has_extended_attribute_value(getter, 'Custom', 'PropertyGetter')),
'is_custom_property_enumerator': has_extended_attribute_value(
getter, 'Custom', 'PropertyEnumerator'),
'is_custom_property_query': has_extended_attribute_value(
getter, 'Custom', 'PropertyQuery'),
'is_enumerable': 'NotEnumerable' not in extended_attributes,
'is_null_expression': is_null_expression(idl_type),
'is_raises_exception': is_raises_exception,
'name': cpp_name(getter),
'use_output_parameter_for_result': use_output_parameter_for_result,
'v8_set_return_value': idl_type.v8_set_return_value('result', extended_attributes=extended_attributes, script_wrappable='impl', release=idl_type.release),
}
def property_setter(setter, interface):
if not setter:
return None
idl_type = setter.arguments[1].idl_type
extended_attributes = setter.extended_attributes
is_call_with_script_state = v8_utilities.has_extended_attribute_value(setter, 'CallWith', 'ScriptState')
is_raises_exception = 'RaisesException' in extended_attributes
# [TypeChecking=Interface] / [LegacyInterfaceTypeChecking]
has_type_checking_interface = (
not is_legacy_interface_type_checking(interface, setter) and
idl_type.is_wrapper_type)
return {
'has_exception_state': (is_raises_exception or
idl_type.v8_conversion_needs_exception_state),
'has_type_checking_interface': has_type_checking_interface,
'idl_type': idl_type.base_type,
'is_call_with_script_state': is_call_with_script_state,
'is_custom': 'Custom' in extended_attributes,
'is_nullable': idl_type.is_nullable,
'is_raises_exception': is_raises_exception,
'name': cpp_name(setter),
'v8_value_to_local_cpp_value': idl_type.v8_value_to_local_cpp_value(
extended_attributes, 'v8Value', 'propertyValue'),
}
def property_deleter(deleter):
if not deleter:
return None
idl_type = deleter.idl_type
extended_attributes = deleter.extended_attributes
is_call_with_script_state = v8_utilities.has_extended_attribute_value(deleter, 'CallWith', 'ScriptState')
return {
'is_call_with_script_state': is_call_with_script_state,
'is_custom': 'Custom' in extended_attributes,
'is_raises_exception': 'RaisesException' in extended_attributes,
'name': cpp_name(deleter),
}
|
{
"content_hash": "35114030b98a40e6590ec818fdf284e1",
"timestamp": "",
"source": "github",
"line_count": 1288,
"max_line_length": 162,
"avg_line_length": 45.536490683229815,
"alnum_prop": 0.6367325365296415,
"repo_name": "sgraham/nope",
"id": "6e4f37812c3fede093a6b387f19839620cd78a2e",
"size": "60316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Source/bindings/scripts/v8_interface.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "39967"
},
{
"name": "C",
"bytes": "4061434"
},
{
"name": "C++",
"bytes": "279546186"
},
{
"name": "CMake",
"bytes": "27212"
},
{
"name": "CSS",
"bytes": "919339"
},
{
"name": "Emacs Lisp",
"bytes": "988"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "15989749"
},
{
"name": "Java",
"bytes": "7541683"
},
{
"name": "JavaScript",
"bytes": "32372588"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "40513"
},
{
"name": "Objective-C",
"bytes": "1584184"
},
{
"name": "Objective-C++",
"bytes": "8249988"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "169060"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427339"
},
{
"name": "Python",
"bytes": "8346306"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "844553"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class DiskInstanceView(Model):
"""The instance view of the disk.
:param name: The disk name.
:type name: str
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2016_03_30.models.InstanceViewStatus]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, **kwargs):
super(DiskInstanceView, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.statuses = kwargs.get('statuses', None)
|
{
"content_hash": "19cb38c4fd2810ce83db79cea1caa713",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 29.772727272727273,
"alnum_prop": 0.6137404580152672,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "50ab86fe9ecae3b57b9533d5d4b91ca9b0d05393",
"size": "1129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/disk_instance_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp.stanza import Message
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.plugins.xep_0224 import stanza
log = logging.getLogger(__name__)
class XEP_0224(BasePlugin):
"""
XEP-0224: Attention
"""
name = 'xep_0224'
description = 'XEP-0224: Attention'
dependencies = set(['xep_0030'])
stanza = stanza
def plugin_init(self):
"""Start the XEP-0224 plugin."""
register_stanza_plugin(Message, stanza.Attention)
self.xmpp.register_handler(
Callback('Attention',
StanzaPath('message/attention'),
self._handle_attention))
def plugin_end(self):
self.xmpp['xep_0030'].del_feature(feature=stanza.Attention.namespace)
self.xmpp.remove_handler('Attention')
def session_bind(self, jid):
self.xmpp['xep_0030'].add_feature(stanza.Attention.namespace)
def request_attention(self, to, mfrom=None, mbody=''):
"""
Send an attention message with an optional body.
Arguments:
to -- The attention request recipient's JID.
mfrom -- Optionally specify the sender of the attention request.
mbody -- An optional message body to include in the request.
"""
m = self.xmpp.Message()
m['to'] = to
m['type'] = 'headline'
m['attention'] = True
if mfrom:
m['from'] = mfrom
m['body'] = mbody
m.send()
def _handle_attention(self, msg):
"""
Raise an event after receiving a message with an attention request.
Arguments:
msg -- A message stanza with an attention element.
"""
log.debug("Received attention request from: %s", msg['from'])
self.xmpp.event('attention', msg)
|
{
"content_hash": "26ee8f38e98771616c13dce2d848f1bd",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 77,
"avg_line_length": 29.066666666666666,
"alnum_prop": 0.6220183486238532,
"repo_name": "RedbackThomson/LoLShadow",
"id": "4e560604d68e7972d3f14b79a55c2b11fbe01b7b",
"size": "2180",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "sleekxmpp/plugins/xep_0224/attention.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1220229"
}
],
"symlink_target": ""
}
|
"""Prints the size of each given file and optionally computes the size of
libchrome.so without the dependencies added for building with android NDK.
Also breaks down the contents of the APK to determine the installed size
and assign size contributions to different classes of file.
"""
import collections
import json
import operator
import optparse
import os
import re
import sys
import tempfile
import zipfile
import zlib
import devil_chromium
from devil.utils import cmd_helper
from pylib.constants import host_paths
_GRIT_PATH = os.path.join(host_paths.DIR_SOURCE_ROOT, 'tools', 'grit')
with host_paths.SysPath(_GRIT_PATH):
from grit.format import data_pack # pylint: disable=import-error
with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
# Static initializers expected in official builds. Note that this list is built
# using 'nm' on libchrome.so which results from a GCC official build (i.e.
# Clang is not supported currently).
_BASE_CHART = {
'format_version': '0.1',
'benchmark_name': 'resource_sizes',
'benchmark_description': 'APK resource size information.',
'trace_rerun_options': [],
'charts': {}
}
_DUMP_STATIC_INITIALIZERS_PATH = os.path.join(
host_paths.DIR_SOURCE_ROOT, 'tools', 'linux', 'dump-static-initializers.py')
_RC_HEADER_RE = re.compile(r'^#define (?P<name>\w+) (?P<id>\d+)$')
def CountStaticInitializers(so_path):
def get_elf_section_size(readelf_stdout, section_name):
# Matches: .ctors PROGBITS 000000000516add0 5169dd0 000010 00 WA 0 0 8
match = re.search(r'\.%s.*$' % re.escape(section_name),
readelf_stdout, re.MULTILINE)
if not match:
return (False, -1)
size_str = re.split(r'\W+', match.group(0))[5]
return (True, int(size_str, 16))
# Find the number of files with at least one static initializer.
# First determine if we're 32 or 64 bit
stdout = cmd_helper.GetCmdOutput(['readelf', '-h', so_path])
elf_class_line = re.search('Class:.*$', stdout, re.MULTILINE).group(0)
elf_class = re.split(r'\W+', elf_class_line)[1]
if elf_class == 'ELF32':
word_size = 4
else:
word_size = 8
# Then find the number of files with global static initializers.
# NOTE: this is very implementation-specific and makes assumptions
# about how compiler and linker implement global static initializers.
si_count = 0
stdout = cmd_helper.GetCmdOutput(['readelf', '-SW', so_path])
has_init_array, init_array_size = get_elf_section_size(stdout, 'init_array')
if has_init_array:
si_count = init_array_size / word_size
si_count = max(si_count, 0)
return si_count
def GetStaticInitializers(so_path):
output = cmd_helper.GetCmdOutput([_DUMP_STATIC_INITIALIZERS_PATH, '-d',
so_path])
return output.splitlines()
def ReportPerfResult(chart_data, graph_title, trace_title, value, units,
improvement_direction='down', important=True):
"""Outputs test results in correct format.
If chart_data is None, it outputs data in old format. If chart_data is a
dictionary, formats in chartjson format. If any other format defaults to
old format.
"""
if chart_data and isinstance(chart_data, dict):
chart_data['charts'].setdefault(graph_title, {})
chart_data['charts'][graph_title][trace_title] = {
'type': 'scalar',
'value': value,
'units': units,
'improvement_direction': improvement_direction,
'important': important
}
else:
perf_tests_results_helper.PrintPerfResult(
graph_title, trace_title, [value], units)
def PrintResourceSizes(files, chartjson=None):
"""Prints the sizes of each given file.
Args:
files: List of files to print sizes for.
"""
for f in files:
ReportPerfResult(chartjson, 'ResourceSizes', os.path.basename(f) + ' size',
os.path.getsize(f), 'bytes')
def PrintApkAnalysis(apk_filename, chartjson=None):
"""Analyse APK to determine size contributions of different file classes."""
# Define a named tuple type for file grouping.
# name: Human readable name for this file group
# regex: Regular expression to match filename
# extracted: Function that takes a file name and returns whether the file is
# extracted from the apk at install/runtime.
FileGroup = collections.namedtuple('FileGroup',
['name', 'regex', 'extracted'])
# File groups are checked in sequence, so more specific regexes should be
# earlier in the list.
YES = lambda _: True
NO = lambda _: False
FILE_GROUPS = (
FileGroup('Native code', r'\.so$', lambda f: 'crazy' not in f),
FileGroup('Java code', r'\.dex$', YES),
FileGroup('Native resources (no l10n)', r'\.pak$', NO),
# For locale paks, assume only english paks are extracted.
FileGroup('Native resources (l10n)', r'\.lpak$', lambda f: 'en_' in f),
FileGroup('ICU (i18n library) data', r'assets/icudtl\.dat$', NO),
FileGroup('V8 Snapshots', r'\.bin$', NO),
FileGroup('PNG drawables', r'\.png$', NO),
FileGroup('Non-compiled Android resources', r'^res/', NO),
FileGroup('Compiled Android resources', r'\.arsc$', NO),
FileGroup('Package metadata', r'^(META-INF/|AndroidManifest\.xml$)', NO),
FileGroup('Unknown files', r'.', NO),
)
apk = zipfile.ZipFile(apk_filename, 'r')
try:
apk_contents = apk.infolist()
finally:
apk.close()
total_apk_size = os.path.getsize(apk_filename)
apk_basename = os.path.basename(apk_filename)
found_files = {}
for group in FILE_GROUPS:
found_files[group] = []
for member in apk_contents:
for group in FILE_GROUPS:
if re.search(group.regex, member.filename):
found_files[group].append(member)
break
else:
raise KeyError('No group found for file "%s"' % member.filename)
total_install_size = total_apk_size
for group in FILE_GROUPS:
apk_size = sum(member.compress_size for member in found_files[group])
install_size = apk_size
install_bytes = sum(f.file_size for f in found_files[group]
if group.extracted(f.filename))
install_size += install_bytes
total_install_size += install_bytes
ReportPerfResult(chartjson, apk_basename + '_Breakdown',
group.name + ' size', apk_size, 'bytes')
ReportPerfResult(chartjson, apk_basename + '_InstallBreakdown',
group.name + ' size', install_size, 'bytes')
transfer_size = _CalculateCompressedSize(apk_filename)
ReportPerfResult(chartjson, apk_basename + '_InstallSize',
'Estimated installed size', total_install_size, 'bytes')
ReportPerfResult(chartjson, apk_basename + '_InstallSize', 'APK size',
total_apk_size, 'bytes')
ReportPerfResult(chartjson, apk_basename + '_TransferSize',
'Transfer size (deflate)', transfer_size, 'bytes')
def IsPakFileName(file_name):
"""Returns whether the given file name ends with .pak or .lpak."""
return file_name.endswith('.pak') or file_name.endswith('.lpak')
def PrintPakAnalysis(apk_filename, min_pak_resource_size, build_type):
"""Print sizes of all resources in all pak files in |apk_filename|."""
print
print 'Analyzing pak files in %s...' % apk_filename
# A structure for holding details about a pak file.
Pak = collections.namedtuple(
'Pak', ['filename', 'compress_size', 'file_size', 'resources'])
# Build a list of Pak objets for each pak file.
paks = []
apk = zipfile.ZipFile(apk_filename, 'r')
try:
for i in (x for x in apk.infolist() if IsPakFileName(x.filename)):
with tempfile.NamedTemporaryFile() as f:
f.write(apk.read(i.filename))
f.flush()
paks.append(Pak(i.filename, i.compress_size, i.file_size,
data_pack.DataPack.ReadDataPack(f.name).resources))
finally:
apk.close()
# Output the overall pak file summary.
total_files = len(paks)
total_compress_size = sum(pak.compress_size for pak in paks)
total_file_size = sum(pak.file_size for pak in paks)
print 'Total pak files: %d' % total_files
print 'Total compressed size: %s' % _FormatBytes(total_compress_size)
print 'Total uncompressed size: %s' % _FormatBytes(total_file_size)
print
# Output the table of details about all pak files.
print '%25s%11s%21s%21s' % (
'FILENAME', 'RESOURCES', 'COMPRESSED SIZE', 'UNCOMPRESSED SIZE')
for pak in sorted(paks, key=operator.attrgetter('file_size'), reverse=True):
print '%25s %10s %12s %6.2f%% %12s %6.2f%%' % (
pak.filename,
len(pak.resources),
_FormatBytes(pak.compress_size),
100.0 * pak.compress_size / total_compress_size,
_FormatBytes(pak.file_size),
100.0 * pak.file_size / total_file_size)
print
print 'Analyzing pak resources in %s...' % apk_filename
# Calculate aggregate stats about resources across pak files.
resource_count_map = collections.defaultdict(int)
resource_size_map = collections.defaultdict(int)
resource_overhead_bytes = 6
for pak in paks:
for r in pak.resources:
resource_count_map[r] += 1
resource_size_map[r] += len(pak.resources[r]) + resource_overhead_bytes
# Output the overall resource summary.
total_resource_size = sum(resource_size_map.values())
total_resource_count = len(resource_count_map)
assert total_resource_size <= total_file_size
print 'Total pak resources: %s' % total_resource_count
print 'Total uncompressed resource size: %s' % _FormatBytes(
total_resource_size)
print
resource_id_name_map = _GetResourceIdNameMap(build_type)
# Output the table of details about all resources across pak files.
print
print '%56s %5s %17s' % ('RESOURCE', 'COUNT', 'UNCOMPRESSED SIZE')
for i in sorted(resource_size_map, key=resource_size_map.get,
reverse=True):
if resource_size_map[i] >= min_pak_resource_size:
print '%56s %5s %9s %6.2f%%' % (
resource_id_name_map.get(i, i),
resource_count_map[i],
_FormatBytes(resource_size_map[i]),
100.0 * resource_size_map[i] / total_resource_size)
def _GetResourceIdNameMap(build_type):
"""Returns a map of {resource_id: resource_name}."""
out_dir = os.path.join(host_paths.DIR_SOURCE_ROOT, 'out', build_type)
assert os.path.isdir(out_dir), 'Failed to locate out dir at %s' % out_dir
print 'Looking at resources in: %s' % out_dir
grit_headers = []
for root, _, files in os.walk(out_dir):
if root.endswith('grit'):
grit_headers += [os.path.join(root, f) for f in files if f.endswith('.h')]
assert grit_headers, 'Failed to find grit headers in %s' % out_dir
id_name_map = {}
for header in grit_headers:
with open(header, 'r') as f:
for line in f.readlines():
m = _RC_HEADER_RE.match(line.strip())
if m:
i = int(m.group('id'))
name = m.group('name')
if i in id_name_map and name != id_name_map[i]:
print 'WARNING: Resource ID conflict %s (%s vs %s)' % (
i, id_name_map[i], name)
id_name_map[i] = name
return id_name_map
def PrintStaticInitializersCount(so_with_symbols_path, chartjson=None):
"""Emits the performance result for static initializers found in the provided
shared library. Additionally, files for which static initializers were
found are printed on the standard output.
Args:
so_with_symbols_path: Path to the unstripped libchrome.so file.
"""
# GetStaticInitializers uses get-static-initializers.py to get a list of all
# static initializers. This does not work on all archs (particularly arm).
# TODO(rnephew): Get rid of warning when crbug.com/585588 is fixed.
si_count = CountStaticInitializers(so_with_symbols_path)
static_initializers = GetStaticInitializers(so_with_symbols_path)
if si_count != len(static_initializers):
print ('There are %d files with static initializers, but '
'dump-static-initializers found %d:' %
(si_count, len(static_initializers)))
else:
print 'Found %d files with static initializers:' % si_count
print '\n'.join(static_initializers)
ReportPerfResult(chartjson, 'StaticInitializersCount', 'count',
si_count, 'count')
def _FormatBytes(byts):
"""Pretty-print a number of bytes."""
if byts > 2**20.0:
byts /= 2**20.0
return '%.2fm' % byts
if byts > 2**10.0:
byts /= 2**10.0
return '%.2fk' % byts
return str(byts)
def _CalculateCompressedSize(file_path):
CHUNK_SIZE = 256 * 1024
compressor = zlib.compressobj()
total_size = 0
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(CHUNK_SIZE), ''):
total_size += len(compressor.compress(chunk))
total_size += len(compressor.flush())
return total_size
def main(argv):
usage = """Usage: %prog [options] file1 file2 ...
Pass any number of files to graph their sizes. Any files with the extension
'.apk' will be broken down into their components on a separate graph."""
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option('--so-path', help='Path to libchrome.so.')
option_parser.add_option('--so-with-symbols-path',
help='Path to libchrome.so with symbols.')
option_parser.add_option('--min-pak-resource-size', type='int',
default=20*1024,
help='Minimum byte size of displayed pak resources.')
option_parser.add_option('--build_type', dest='build_type', default='Debug',
help='Sets the build type, default is Debug.')
option_parser.add_option('--chartjson', action="store_true",
help='Sets output mode to chartjson.')
option_parser.add_option('--output-dir', default='.',
help='Directory to save chartjson to.')
option_parser.add_option('-d', '--device',
help='Dummy option for perf runner.')
options, args = option_parser.parse_args(argv)
files = args[1:]
chartjson = _BASE_CHART.copy() if options.chartjson else None
# For backward compatibilty with buildbot scripts, treat --so-path as just
# another file to print the size of. We don't need it for anything special any
# more.
if options.so_path:
files.append(options.so_path)
if not files:
option_parser.error('Must specify a file')
devil_chromium.Initialize()
if options.so_with_symbols_path:
PrintStaticInitializersCount(
options.so_with_symbols_path, chartjson=chartjson)
PrintResourceSizes(files, chartjson=chartjson)
for f in files:
if f.endswith('.apk'):
PrintApkAnalysis(f, chartjson=chartjson)
PrintPakAnalysis(f, options.min_pak_resource_size, options.build_type)
if chartjson:
results_path = os.path.join(options.output_dir, 'results-chart.json')
with open(results_path, 'w') as json_file:
json.dump(chartjson, json_file)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
{
"content_hash": "f48a38de78952b6ea856cad87485ca67",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 80,
"avg_line_length": 37.656716417910445,
"alnum_prop": 0.6600607742105958,
"repo_name": "highweb-project/highweb-webcl-html5spec",
"id": "53743266b90680439c38983aecf271a5ab0b95e6",
"size": "15323",
"binary": false,
"copies": "2",
"ref": "refs/heads/highweb-20160310",
"path": "build/android/resource_sizes.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from copy import deepcopy
from django.contrib import admin
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import NoReverseMatch
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from mezzanine.conf import settings
from mezzanine.core.admin import DisplayableAdmin, DisplayableAdminForm
from mezzanine.pages.models import Page, RichTextPage, Link
from mezzanine.utils.urls import admin_url
page_fieldsets = deepcopy(DisplayableAdmin.fieldsets)
page_fieldsets[0][1]["fields"] += ("in_menus", "login_required",)
class PageAdminForm(DisplayableAdminForm):
def clean_slug(self):
"""
If the slug has been changed, save the old one. We will use it later
in PageAdmin.model_save() to make the slug change propagate down the
page tree.
"""
if self.instance.slug != self.cleaned_data['slug']:
self.instance._old_slug = self.instance.slug
return self.cleaned_data['slug']
class PageAdmin(DisplayableAdmin):
"""
Admin class for the ``Page`` model and all subclasses of
``Page``. Handles redirections between admin interfaces for the
``Page`` model and its subclasses.
"""
form = PageAdminForm
fieldsets = page_fieldsets
change_list_template = "admin/pages/page/change_list.html"
def __init__(self, *args, **kwargs):
"""
For ``Page`` subclasses that are registered with an Admin class
that doesn't implement fieldsets, add any extra model fields
to this instance's fieldsets. This mimics Django's behaviour of
adding all model fields when no fieldsets are defined on the
Admin class.
"""
super(PageAdmin, self).__init__(*args, **kwargs)
# Test that the fieldsets don't differ from PageAdmin's.
if self.model is not Page and self.fieldsets == PageAdmin.fieldsets:
# Make a copy so that we aren't modifying other Admin
# classes' fieldsets.
self.fieldsets = deepcopy(self.fieldsets)
# Insert each field between the publishing fields and nav
# fields. Do so in reverse order to retain the order of
# the model's fields.
exclude_fields = Page._meta.get_all_field_names() + ["page_ptr"]
try:
exclude_fields.extend(self.exclude)
except (AttributeError, TypeError):
pass
try:
exclude_fields.extend(self.form.Meta.exclude)
except (AttributeError, TypeError):
pass
fields = self.model._meta.fields + self.model._meta.many_to_many
for field in reversed(fields):
if field.name not in exclude_fields and field.editable:
self.fieldsets[0][1]["fields"].insert(3, field.name)
def in_menu(self):
"""
Hide subclasses from the admin menu.
"""
return self.model is Page
def _check_permission(self, request, page, permission):
"""
Runs the custom permission check and raises an
exception if False.
"""
if not getattr(page, "can_" + permission)(request):
raise PermissionDenied
def add_view(self, request, **kwargs):
"""
For the ``Page`` model, redirect to the add view for the
first page model, based on the ``ADD_PAGE_ORDER`` setting.
"""
if self.model is Page:
return HttpResponseRedirect(self.get_content_models()[0].add_url)
return super(PageAdmin, self).add_view(request, **kwargs)
def change_view(self, request, object_id, **kwargs):
"""
For the ``Page`` model, check ``page.get_content_model()``
for a subclass and redirect to its admin change view.
Also enforce custom change permissions for the page instance.
"""
page = get_object_or_404(Page, pk=object_id)
content_model = page.get_content_model()
self._check_permission(request, content_model, "change")
if self.model is Page:
if content_model is not None:
change_url = admin_url(content_model.__class__, "change",
content_model.id)
return HttpResponseRedirect(change_url)
kwargs.setdefault("extra_context", {})
kwargs["extra_context"].update({
"hide_delete_link": not content_model.can_delete(request),
"hide_slug_field": content_model.overridden(),
})
return super(PageAdmin, self).change_view(request, object_id, **kwargs)
def delete_view(self, request, object_id, **kwargs):
"""
Enforce custom delete permissions for the page instance.
"""
page = get_object_or_404(Page, pk=object_id)
content_model = page.get_content_model()
self._check_permission(request, content_model, "delete")
return super(PageAdmin, self).delete_view(request, object_id, **kwargs)
def changelist_view(self, request, extra_context=None):
"""
Redirect to the ``Page`` changelist view for ``Page``
subclasses.
"""
if self.model is not Page:
return HttpResponseRedirect(admin_url(Page, "changelist"))
if not extra_context:
extra_context = {}
extra_context["page_models"] = self.get_content_models()
return super(PageAdmin, self).changelist_view(request, extra_context)
def save_model(self, request, obj, form, change):
"""
Set the ID of the parent page if passed in via querystring, and make
sure the new slug propagates to all descendant pages.
"""
if change and hasattr(obj, "_old_slug"):
# _old_slug was set in PageAdminForm.clean_slug().
new_slug = obj.slug
obj.slug = obj._old_slug
obj.set_slug(new_slug)
# Force parent to be saved to trigger handling of ordering and slugs.
parent = request.GET.get("parent")
if parent is not None and not change:
obj.parent_id = parent
obj.save()
super(PageAdmin, self).save_model(request, obj, form, change)
def _maintain_parent(self, request, response):
"""
Maintain the parent ID in the querystring for response_add and
response_change.
"""
location = response._headers.get("location")
parent = request.GET.get("parent")
if parent and location and "?" not in location[1]:
url = "%s?parent=%s" % (location[1], parent)
return HttpResponseRedirect(url)
return response
def response_add(self, request, obj):
"""
Enforce page permissions and maintain the parent ID in the
querystring.
"""
response = super(PageAdmin, self).response_add(request, obj)
return self._maintain_parent(request, response)
def response_change(self, request, obj):
"""
Enforce page permissions and maintain the parent ID in the
querystring.
"""
response = super(PageAdmin, self).response_change(request, obj)
return self._maintain_parent(request, response)
@classmethod
def get_content_models(cls):
"""
Return all Page subclasses that are admin registered, ordered
based on the ``ADD_PAGE_ORDER`` setting.
"""
models = []
for model in Page.get_content_models():
try:
admin_url(model, "add")
except NoReverseMatch:
continue
else:
setattr(model, "name", model._meta.verbose_name)
setattr(model, "add_url", admin_url(model, "add"))
models.append(model)
order = [name.lower() for name in settings.ADD_PAGE_ORDER]
def sort_key(page):
name = "%s.%s" % (page._meta.app_label, page._meta.object_name)
try:
order.index(name.lower())
except ValueError:
return page.name
return sorted(models, key=sort_key)
# Drop the meta data fields, and move slug towards the stop.
link_fieldsets = deepcopy(page_fieldsets[:1])
link_fieldsets[0][1]["fields"] = link_fieldsets[0][1]["fields"][:-1]
link_fieldsets[0][1]["fields"].insert(1, "slug")
class LinkAdmin(PageAdmin):
fieldsets = link_fieldsets
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Make slug mandatory.
"""
if db_field.name == "slug":
kwargs["required"] = True
return super(LinkAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def save_form(self, request, form, change):
"""
Don't show links in the sitemap.
"""
obj = form.save(commit=False)
if not obj.id and "in_sitemap" not in form.fields:
obj.in_sitemap = False
return super(LinkAdmin, self).save_form(request, form, change)
admin.site.register(Page, PageAdmin)
admin.site.register(RichTextPage, PageAdmin)
admin.site.register(Link, LinkAdmin)
|
{
"content_hash": "e7a85e1ece8500466cf1fd206e596f85",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 79,
"avg_line_length": 38.08298755186722,
"alnum_prop": 0.610154717803443,
"repo_name": "eRestin/Mezz",
"id": "ccba88dfa2892272901ed9ff02171831491f0fc0",
"size": "9179",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mezzanine/pages/admin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "136717"
},
{
"name": "JavaScript",
"bytes": "287173"
},
{
"name": "Python",
"bytes": "1015813"
}
],
"symlink_target": ""
}
|
from twilio.rest import Client
# Initialize the client
account = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
token = "your_auth_token"
client = Client(account, token)
members = client.chat \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members \
.list()
for member in members:
print(member.sid)
|
{
"content_hash": "cfb252a16a5d0b5c1354b77576dab3df",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 65,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.655421686746988,
"repo_name": "teoreteetik/api-snippets",
"id": "78042a9365974b2a54c3a0b760a6e016d46c4c6b",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ip-messaging/rest/members/list-members/list-members.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
}
|
import re
import os
from compose.cli.command import get_project
import compose.cli.command as cmd
from compose.cli.main import TopLevelCommand, perform_command
from compose.cli.utils import get_version_info
from compose.config.config import load
from compose.config.environment import Environment
from compose.const import DEFAULT_TIMEOUT
from compose.project import Project, ProjectNetworks
from compose.service import ConvergenceStrategy, BuildAction, Service
from compose.config.validation import load_jsonschema, get_resolver_path, \
handle_errors, process_config_schema_errors, \
process_service_constraint_errors
from dork_compose.helpers import tru
from functools import partial
from jsonschema import Draft4Validator
from jsonschema import FormatChecker
from jsonschema import RefResolver
import dork_compose
import logging
log = logging.getLogger(__name__)
def dork_get_version_info(scope):
return '%s%sdork-compose version %s, build %s ' % (
get_version_info(scope),
', ' if scope == 'compose' else '\n',
dork_compose.__version__,
dork_get_build_version()
)
def dork_get_build_version():
filename = os.path.join(os.path.dirname(dork_compose.__file__), 'GITSHA')
if not os.path.exists(filename):
return 'unknown'
with open(filename) as fh:
return fh.read().strip()
def dork_perform_command(options, handler, command_options):
if '--timeout' in options and not options['--timeout']:
options['--timeout'] = os.environ.get('DORK_DEFAULT_TIMEOUT', DEFAULT_TIMEOUT)
return perform_command(options, handler, command_options)
def dork_config_load(plugins, config_details):
config_data = load(config_details)
for plugin in plugins:
plugin.preprocess_config(config_data)
return config_data
def get_dork_project(plugins, project_dir, config_path=None, project_name=None,
verbose=False, host=None, tls_config=None,
environment=None, override_dir=None):
cmd.config.load = partial(dork_config_load, plugins)
project = get_project(project_dir, config_path, project_name, verbose, host, tls_config, environment, override_dir)
if 'COMPOSE_PROJECT_NAME' in os.environ:
project.name = os.environ['COMPOSE_PROJECT_NAME']
return DorkProject.from_project(project, plugins)
def get_dork_project_name(working_dir, project_name=None, environment=None):
def normalize_name(name):
# Full copy because compose strips dashes from project names.
return re.sub(r'[^a-z0-9\-]', '', name.lower())
if not environment:
environment = Environment.from_env_file(working_dir)
project_name = project_name or environment.get('COMPOSE_PROJECT_NAME')
if project_name:
return normalize_name(project_name)
project = os.path.basename(os.path.abspath(working_dir))
if project:
return normalize_name(project)
return 'default'
def dork_validate_service_constraints(plugins, config, service_name, version):
def handler(errors):
return process_service_constraint_errors(errors, service_name, version)
schema = load_jsonschema(version)
for plugin in plugins:
plugin.alter_config_schema(schema)
validator = Draft4Validator(schema['definitions']['constraints']['service'])
handle_errors(validator.iter_errors(config), handler, None)
def dork_validate_against_config_schema(plugins, config_file):
schema = load_jsonschema(config_file)
for plugin in plugins:
plugin.alter_config_schema(schema)
format_checker = FormatChecker(["ports", "expose"])
validator = Draft4Validator(
schema,
# TODO: wait for fix in docker-compose
# docker-compose does not append filename, therefore caches
# always miss.
resolver=RefResolver(get_resolver_path() + "config_schema_v{0}.json".format(config_file.version), schema),
format_checker=format_checker)
handle_errors(
validator.iter_errors(config_file.config),
process_config_schema_errors,
config_file.filename)
class DorkTopLevelCommand(TopLevelCommand):
__doc__ = TopLevelCommand.__doc__ + "\n".join([
" snapshot Save or restore runtime data snapshots.",
" info Display information about services."
])
def __init__(self, project, project_dir='.'):
super(DorkTopLevelCommand, self).__init__(project, project_dir)
def snapshot(self, options):
"""
Save or restore volume snapshots.
Usage: snapshot COMMAND [SNAPSHOTS...]
Commands:
save Store volumes state as snapshot.
load Load the closest snapshot or a specific one.
ls List all available snapshots.
rm Clean up snapshots or remove a specific one.
"""
getattr(self.project, 'snapshot_' + options['COMMAND'])(options['SNAPSHOTS'])
def info(self, options):
"""
Display service status information.
Usage: info
"""
from terminaltables import AsciiTable
rows = []
for key, value in self.project.info().iteritems():
rows.append([key + ':', value])
table = AsciiTable(rows)
table.outer_border = False
table.inner_column_border = False
table.inner_heading_row_border = False
table.title = 'Dork status information'
print table.table
class Pluggable(object):
def set_plugins(self, plugins):
self.__plugins = plugins
@property
def plugins(self):
return self.__plugins
class DorkService(Service, Pluggable):
@classmethod
def from_service(cls, service, plugins=()):
service.__class__ = cls
service.set_plugins(plugins)
return service
def build(self, no_cache=False, pull=False, force_rm=False, build_args_override=None):
for plugin in self.plugins:
plugin.building(self, no_cache, pull, force_rm)
result = super(DorkService, self).build(no_cache, pull, force_rm, build_args_override)
for plugin in self.plugins:
plugin.after_build(self, no_cache, pull, force_rm)
return result
def start_container(self, container):
for plugin in self.plugins:
plugin.starting_container(container)
return super(DorkService, self).start_container(container)
def create_container(self, one_off=False, previous_container=None,
number=None, quiet=False, **override_options):
for plugin in self.plugins:
plugin.creating_container(self)
return super(DorkService, self).create_container(one_off,
previous_container,
number, quiet,
**override_options)
class DorkNetworks(ProjectNetworks, Pluggable):
def initialize(self):
super(DorkNetworks, self).initialize()
for key, network in self.networks.iteritems():
for plugin in self.plugins:
plugin.attach_auxiliary_project(network.full_name)
def remove(self):
for key, network in self.networks.iteritems():
for plugin in self.plugins:
plugin.detach_auxiliary_project(network.full_name)
super(DorkNetworks, self).remove()
class DorkProject(Project, Pluggable):
@classmethod
def from_project(cls, project, plugins=()):
project.__class__ = cls
project.set_plugins(plugins)
project.networks.__class__ = DorkNetworks
project.networks.set_plugins(plugins)
return project
@classmethod
def from_config(cls, name, config_data, client, plugins=()):
project = super(DorkProject, cls).from_config(name, config_data, client)
project.set_plugins(plugins)
project.networks.set_plugins(plugins)
return project
def get_service(self, name):
return DorkService.from_service(super(DorkProject, self).get_service(name), self.plugins)
def up(self,
service_names=None,
start_deps=True,
strategy=ConvergenceStrategy.changed,
do_build=BuildAction.none,
timeout=DEFAULT_TIMEOUT,
detached=False,
remove_orphans=False,
scale_override=None):
for plugin in self.plugins:
plugin.initializing(self, service_names)
containers = super(DorkProject, self).up(service_names, start_deps, strategy, do_build, timeout, detached, remove_orphans, scale_override)
for plugin in self.plugins:
plugin.initialized(self, containers)
return containers
def down(self, remove_image_type, include_volumes, remove_orphans=False):
for plugin in self.plugins:
plugin.removing(self, include_volumes)
super(DorkProject, self).down(remove_image_type, include_volumes, remove_orphans)
for plugin in self.plugins:
plugin.removed(self, include_volumes)
def __snapshots(self):
snapshots = []
for plugin in self.plugins:
snapshots.extend(plugin.snapshot_ls())
return snapshots
def snapshot_save(self, names=()):
# If the provided names list is empty, collect plugins for
# name suggestions from autosave plugins.
if not names:
names = filter(tru, [p.snapshot_autosave() for p in self.plugins])
# Invoke plugin save hooks with collected names.
self.stop()
for plugin in self.plugins:
plugin.snapshot_save(names, self.volumes.volumes)
self.start()
def snapshot_load(self, names=()):
# If the names list is empty, collect most appropriate snapshots
# from autoload plugins.
if not names:
names = filter(tru, [p.snapshot_autoload(self.__snapshots()) for p in self.plugins])
# Iterate plugins from the right and stop when the first one
# successfully loaded a snapshot.
self.stop()
for plugin in reversed(self.plugins):
loaded = plugin.snapshot_load(names, self.volumes.volumes)
if loaded:
log.info("Loaded snapshot %s through plugin %s." % (loaded, plugin.name))
break
self.start()
def snapshot_rm(self, names=()):
# If the names list is empty, collect most removable snapshots
# from autoclean plugins.
if not names:
names = filter(tru, [p.snapshot_autoclean(self.__snapshots()) for p in self.plugins])
for plugin in self.plugins:
for removed in plugin.snapshot_rm(names):
log.info("Removed snapshot %s through plugin %s." % (removed, plugin.name))
def snapshot_ls(self, snapshots=()):
for name in self.__snapshots():
if not snapshots or name in snapshots:
log.info(name)
def info(self):
info = {}
for plugin in self.plugins:
info.update(plugin.info(self))
return info
|
{
"content_hash": "8f572da66681750d7507da2bdae0427c",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 146,
"avg_line_length": 34.027355623100306,
"alnum_prop": 0.6418043769539973,
"repo_name": "iamdork/compose",
"id": "f8e9282c75bf1f1cd6036983f4116cabb6e158e0",
"size": "11195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dork_compose/injections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "228"
},
{
"name": "Nginx",
"bytes": "710"
},
{
"name": "PHP",
"bytes": "51"
},
{
"name": "Python",
"bytes": "67396"
},
{
"name": "Shell",
"bytes": "14433"
}
],
"symlink_target": ""
}
|
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test.test_support':
raise ImportError('test_support must be imported from the test package')
import contextlib
import errno
import functools
import gc
import socket
import sys
import os
import platform
import shutil
import warnings
import unittest
import importlib
import UserDict
import re
import time
import struct
import sysconfig
try:
import thread
except ImportError:
thread = None
__all__ = ["Error", "TestFailed", "ResourceDenied", "import_module",
"verbose", "use_resources", "max_memuse", "record_original_stdout",
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
"is_resource_enabled", "requires", "find_unused_port", "bind_port",
"fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ",
"SAVEDCWD", "temp_cwd", "findfile", "sortdict", "check_syntax_error",
"open_urlresource", "check_warnings", "check_py3k_warnings",
"CleanImport", "EnvironmentVarGuard", "captured_output",
"captured_stdout", "TransientResource", "transient_internet",
"run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest",
"BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
"threading_cleanup", "reap_children", "cpython_only",
"check_impl_detail", "get_attribute", "py3k_bytes",
"import_fresh_module", "threading_cleanup", "reap_children",
"strip_python_stderr", "IPV6_ENABLED"]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not been enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect."""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError, msg:
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported."""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise."""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Imports and returns a module, deliberately bypassing the sys.modules cache
and importing a fresh copy of the module. Once the import is complete,
the sys.modules cache is restored to its original state.
Modules named in fresh are also imported anew if needed by the import.
If one of these modules can't be imported, None is returned.
Importing of modules named in blocked is prevented while the fresh import
takes place.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
# NOTE: test_heapq, test_json, and test_warnings include extra sanity
# checks to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("module %s has no attribute %s" % (
obj.__name__, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except OSError:
pass
def rmdir(dirname):
try:
_rmdir(dirname)
except OSError as error:
# The directory need not exist.
if error.errno != errno.ENOENT:
raise
def rmtree(path):
try:
_rmtree(path)
except OSError, e:
# Unix returns ENOENT, Windows returns ESRCH.
if e.errno not in (errno.ENOENT, errno.ESRCH):
raise
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
for dirname in sys.path:
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
# Check whether a gui is actually available
def _is_gui_available():
if hasattr(_is_gui_available, 'result'):
return _is_gui_available.result
reason = None
if sys.platform.startswith('win'):
# if Python is running as a service (such as the buildbot service),
# gui interaction may be disallowed
import ctypes
import ctypes.wintypes
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
if not bool(uof.dwFlags & WSF_VISIBLE):
reason = "gui not available (WSF_VISIBLE flag not set)"
elif sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
reason = "gui tests cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
reason = "cannot run without OS X gui process"
# check on every platform whether tkinter can actually do anything
if not reason:
try:
from Tkinter import Tk
root = Tk()
root.update()
root.destroy()
except Exception as e:
err_string = str(e)
if len(err_string) > 50:
err_string = err_string[:50] + ' [...]'
reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
err_string)
_is_gui_available.reason = reason
_is_gui_available.result = not reason
return _is_gui_available.result
def is_resource_enabled(resource):
"""Test whether a resource is enabled.
Known resources are set by regrtest.py. If not running under regrtest.py,
all resources are assumed enabled unless use_resources has been set.
"""
return use_resources is None or resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available."""
if resource == 'gui' and not _is_gui_available():
raise ResourceDenied(_is_gui_available.reason)
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
# Don't use "localhost", since resolving it uses the DNS under recent
# Windows versions (see issue #18792).
HOST = "127.0.0.1"
HOSTv6 = "::1"
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it."""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except EnvironmentError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
def system_must_validate_cert(f):
"""Skip the test on TLS certificate validation failures."""
@functools.wraps(f)
def dec(*args, **kwargs):
try:
f(*args, **kwargs)
except IOError as e:
if "CERTIFICATE_VERIFY_FAILED" in str(e):
raise unittest.SkipTest("system does not contain "
"necessary certificates")
raise
return dec
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if isinstance(x, float) or isinstance(y, float):
try:
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and isinstance(x, (tuple, list)):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return (len(x) > len(y)) - (len(x) < len(y))
return (x > y) - (x < y)
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
# Windows limit seems to be around 512 B, and many Unix kernels have a
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
# (see issue #17835 for a discussion of this number).
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
# A constant likely larger than the underlying OS socket buffer size, to make
# writes blocking.
# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
# for a discussion of this number).
SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
is_jython = sys.platform.startswith('java')
try:
unicode
have_unicode = True
except NameError:
have_unicode = False
requires_unicode = unittest.skipUnless(have_unicode, 'no unicode support')
def u(s):
return unicode(s, 'unicode-escape')
# FS_NONASCII: non-ASCII Unicode character encodable by
# sys.getfilesystemencoding(), or None if there is no such character.
FS_NONASCII = None
if have_unicode:
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
unichr(0x00E6),
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
unichr(0x0130),
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
unichr(0x0141),
# U+03C6 (Greek Small Letter Phi): cp1253
unichr(0x03C6),
# U+041A (Cyrillic Capital Letter Ka): cp1251
unichr(0x041A),
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
unichr(0x05D0),
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
unichr(0x060C),
# U+062A (Arabic Letter Teh): cp720
unichr(0x062A),
# U+0E01 (Thai Character Ko Kai): cp874
unichr(0x0E01),
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
unichr(0x00A0),
# U+20AC (Euro Sign)
unichr(0x20AC),
):
try:
character.encode(sys.getfilesystemencoding())\
.decode(sys.getfilesystemencoding())
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNENCODABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNENCODABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNENCODABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNENCODABLE
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# Save the initial cwd
SAVEDCWD = os.getcwd()
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False):
"""
Context manager that creates a temporary directory and set it as CWD.
The new CWD is created in the current directory and it's named *name*.
If *quiet* is False (default) and it's not possible to create or change
the CWD, an error is raised. If it's True, only a warning is raised
and the original CWD is used.
"""
if have_unicode and isinstance(name, unicode):
try:
name = name.encode(sys.getfilesystemencoding() or 'ascii')
except UnicodeEncodeError:
if not quiet:
raise unittest.SkipTest('unable to encode the cwd name with '
'the filesystem encoding.')
saved_dir = os.getcwd()
is_temporary = False
try:
os.mkdir(name)
os.chdir(name)
is_temporary = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change the CWD to ' + name,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
if is_temporary:
rmtree(name)
def findfile(file, here=None, subdir=None):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
import test
if os.path.isabs(file):
return file
if subdir is not None:
file = os.path.join(subdir, file)
path = sys.path
if here is None:
path = test.__path__ + path
else:
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, check=None):
import urlparse, urllib2
filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(os.path.dirname(__file__), "data", filename)
def check_valid_file(fn):
f = open(fn)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
print >> get_original_stdout(), '\tfetching %s ...' % url
f = urllib2.urlopen(url, timeout=15)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource "%s"' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = [warning.message for warning in w]
missing = []
for msg, cat in filters:
seen = False
for exc in reraise[:]:
message = str(exc)
# Filter out the matching messages
if (re.match(msg, message, re.I) and
issubclass(exc.__class__, cat)):
seen = True
reraise.remove(exc)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %r" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
@contextlib.contextmanager
def check_py3k_warnings(*filters, **kwargs):
"""Context manager to silence py3k warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default False)
Without argument, it defaults to:
check_py3k_warnings(("", DeprecationWarning), quiet=False)
"""
if sys.py3kwarning:
if not filters:
filters = (("", DeprecationWarning),)
else:
# It should not raise any py3k warning
filters = ()
return _filterwarnings(filters, kwargs.get('quiet'))
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(UserDict.DictMixin):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.iteritems():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
@contextlib.contextmanager
def transient_internet(resource_name, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Windows defines EAI_NODATA as 11001 but idiotic getaddrinfo()
# implementation actually returns WSANO_DATA i.e. 11004.
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource '%s' is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except IOError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], IOError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], IOError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout and captured_stdin
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import StringIO
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as s:
print "hello"
self.assertEqual(s.getvalue(), "hello")
"""
return captured_output("stdout")
def captured_stderr():
return captured_output("stderr")
def captured_stdin():
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
_header = '2P'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_vheader = _header + 'P'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + '0P')
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + '0P')
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
import _testcapi
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independent of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.assertFalse(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def precisionbigmemtest(size, memuse, overhead=5*_1M, dry_run=True):
def decorator(f):
def wrapper(self):
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
return f(self, maxsize)
wrapper.size = size
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip(_is_gui_available.reason)
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = guards.values()[0]
assert guards.values() == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
# ----------------------------------
# PyPy extension: you can run::
# python ..../test_foo.py --pdb
# to get a pdb prompt in case of exceptions
ResultClass = unittest.TextTestRunner.resultclass
class TestResultWithPdb(ResultClass):
def addError(self, testcase, exc_info):
ResultClass.addError(self, testcase, exc_info)
if '--pdb' in sys.argv:
import pdb, traceback
traceback.print_tb(exc_info[2])
pdb.post_mortem(exc_info[2])
# ----------------------------------
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
resultclass=TestResultWithPdb)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose:
err += "; run in verbose mode for details"
raise TestFailed(err)
# ----------------------------------
# PyPy extension: you can run::
# python ..../test_foo.py --filter bar
# to run only the test cases whose name contains bar
def filter_maybe(suite):
try:
i = sys.argv.index('--filter')
filter = sys.argv[i+1]
except (ValueError, IndexError):
return suite
tests = []
for test in linearize_suite(suite):
if filter in test._testMethodName:
tests.append(test)
return unittest.TestSuite(tests)
def linearize_suite(suite_or_test):
try:
it = iter(suite_or_test)
except TypeError:
yield suite_or_test
return
for subsuite in it:
for item in linearize_suite(subsuite):
yield item
# ----------------------------------
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
suite = filter_maybe(suite)
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or
sys.platform == 'win32' or
sysconfig.get_config_var('WITH_DOC_STRINGS'))
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
if thread:
return thread._count(),
else:
return 1,
def threading_cleanup(nb_threads):
if not thread:
return
_MAX_COUNT = 10
for count in range(_MAX_COUNT):
n = thread._count()
if n == nb_threads:
break
time.sleep(0.1)
# XXX print a warning in case of failure?
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
if not thread:
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
delattr(obj, attr)
def py3k_bytes(b):
"""Emulate the py3k bytes() constructor.
NOTE: This is only a best effort function.
"""
try:
# memoryview?
return b.tobytes()
except AttributeError:
try:
# iterable of ints?
return b"".join(chr(x) for x in b)
except TypeError:
return bytes(b)
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags."""
import subprocess
return subprocess._args_from_interpreter_flags()
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs\]\r?\n?$", b"", stderr).strip()
return stderr
|
{
"content_hash": "887e0f2ea6794911a59de4c1c484d9b8",
"timestamp": "",
"source": "github",
"line_count": 1589,
"max_line_length": 95,
"avg_line_length": 35.667715544367525,
"alnum_prop": 0.6084409626649728,
"repo_name": "andela-earinde/bellatrix-py",
"id": "9c5467a58352048284438147bd47ec9bb1cc3f50",
"size": "56676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/js/lib/lib/modules/test/test_support.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8833"
},
{
"name": "HTML",
"bytes": "2381"
},
{
"name": "JavaScript",
"bytes": "12775582"
},
{
"name": "Python",
"bytes": "15057969"
}
],
"symlink_target": ""
}
|
from ._bots_operations import BotsOperations
from ._channels_operations import ChannelsOperations
from ._direct_line_operations import DirectLineOperations
from ._operations import Operations
from ._bot_connection_operations import BotConnectionOperations
from ._host_settings_operations import HostSettingsOperations
from ._operation_results_operations import OperationResultsOperations
from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
from ._private_link_resources_operations import PrivateLinkResourcesOperations
__all__ = [
'BotsOperations',
'ChannelsOperations',
'DirectLineOperations',
'Operations',
'BotConnectionOperations',
'HostSettingsOperations',
'OperationResultsOperations',
'PrivateEndpointConnectionsOperations',
'PrivateLinkResourcesOperations',
]
|
{
"content_hash": "f28d8477a2bdd66aba6cfac2293546f1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 90,
"avg_line_length": 40.19047619047619,
"alnum_prop": 0.8175355450236966,
"repo_name": "Azure/azure-sdk-for-python",
"id": "89604c7b3d5307f5498b3ef8b68ca3aa654272d1",
"size": "1312",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sdk/botservice/azure-mgmt-botservice/azure/mgmt/botservice/aio/operations/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from seleniumbase import BaseCase
class ProxyTests(BaseCase):
def test_proxy(self):
self.open("https://ipinfo.io/")
ip_address = self.get_text('#ip-string span[class*="primary"] span')
print("\n\nMy IP Address = %s\n" % ip_address)
print("Displaying Host Info:")
text = self.get_text("#widget-scrollable-container").split("asn:")[0]
rows = text.split("\n")
data = []
for row in rows:
if row.strip() != "":
data.append(row.strip())
print("\n".join(data).replace('\n"', " "))
print("\nThe browser will close automatically in 7 seconds...")
self.sleep(7)
|
{
"content_hash": "e359891282ab03f6b57f8338c7f0a141",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 77,
"avg_line_length": 37.44444444444444,
"alnum_prop": 0.5593471810089021,
"repo_name": "mdmintz/SeleniumBase",
"id": "17f372ed92b5d03a63a47b213da0176091785169",
"size": "674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/proxy_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "268"
},
{
"name": "HTML",
"bytes": "867"
},
{
"name": "JavaScript",
"bytes": "1623"
},
{
"name": "Python",
"bytes": "183487"
},
{
"name": "Shell",
"bytes": "9749"
}
],
"symlink_target": ""
}
|
import commands
def totalCoresUsed():
return commands.getoutput("nova absolute-limits |grep totalCoresUsed |awk '{print $4}'")
def totalInstancesUsed():
return commands.getoutput("nova absolute-limits |grep totalInstancesUsed |awk '{print $4}'")
def totalRAMUserInGB():
mem = commands.getoutput("nova absolute-limits |grep totalRAMUsed |awk '{print $4}'")
mem = float(mem)
mem = mem / 1024
mem = round(mem, 1)
return mem
def totalFloatingIpsUsed():
return commands.getoutput("nova absolute-limits |grep totalFloatingIpsUsed |awk '{print $4}'")
def totalSecurityGroupsUsed():
return commands.getoutput("nova absolute-limits |grep totalSecurityGroupsUsed |awk '{print $4}'")
def launchInstance():
return commands.getoutput("fastnovaboot -n test -i Debian-7.7-server-amd64 -f m1.tiny -s ANTTIDEVEL_Digile_to_TriPort")
|
{
"content_hash": "d0f64431985ca7dcd2c0694da9c80456",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 123,
"avg_line_length": 35.958333333333336,
"alnum_prop": 0.7253765932792584,
"repo_name": "forgeservicelab/testautomation.imagecreation",
"id": "7723b6a09f5933a53307538467c95c708d01c0ec",
"size": "863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NovaClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "863"
}
],
"symlink_target": ""
}
|
"""Fichier contenant le module primaire format."""
from abstraits.module import *
from primaires.format import commandes
from primaires.format.config import cfg_charte
from primaires.format.description_flottante import DescriptionFlottante
from primaires.format.editeurs.floatedit import EdtFloatedit
from primaires.format.message import Message
class Module(BaseModule):
"""Cette classe décrit le module primaire Format.
Ce module est particulièrement chargé du formatage,
notamment des messages à envoyer aux clients.
"""
def __init__(self, importeur):
"""Constructeur du module"""
BaseModule.__init__(self, importeur, "format", "primaire")
def config(self):
"""Configuration du module.
On crée le fichier de configuration afin de l'utiliser plus tard
pour la mise en forme.
"""
type(self.importeur).anaconf.get_config("charte_graph", \
"format/charte.cfg", "modele charte graphique", cfg_charte)
# Ajout des hooks
importeur.hook.ajouter_hook("description:ajouter_variables",
"Hook appelé pour ajouter des variables aux descriptions")
BaseModule.config(self)
self.descriptions_flottantes = {}
def init(self):
"""Initialisation du module.
On récupère les descriptions flottantes.
"""
flottantes = self.importeur.supenr.charger_groupe(DescriptionFlottante)
for flottante in flottantes:
self.descriptions_flottantes[flottante.cle] = flottante
BaseModule.init(self)
def ajouter_commandes(self):
"""Ajout des commandes dans l'interpréteur"""
self.commandes = [
commandes.flottantes.CmdFlottantes(),
]
for cmd in self.commandes:
self.importeur.interpreteur.ajouter_commande(cmd)
# Ajout des éditeurs
self.importeur.interpreteur.ajouter_editeur(EdtFloatedit)
def formater(self, message):
"""Retourne le message formaté.
Voir : primaires.format.message
"""
nv_message = Message(message, \
type(self.importeur).anaconf.get_config("charte_graph"))
return nv_message
def creer_description_flottante(self, cle):
"""Crée une description flottante."""
if cle in self.descriptions_flottantes:
raise KeyError(cle)
flottante = DescriptionFlottante(cle)
self.descriptions_flottantes[cle] = flottante
return flottante
def supprimer_description_flottante(self, cle):
"""Supprime la description flottante indiquée."""
if cle not in self.descriptions_flottantes:
raise KeyError(cle)
flottante = self.descriptions_flottantes.pop(cle)
flottante.detruire()
|
{
"content_hash": "fba436b5c16ee6df464bd7a58d475727",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 31.584269662921347,
"alnum_prop": 0.6609747420846673,
"repo_name": "stormi/tsunami",
"id": "d4bcbd3f64f3fdbda47aebc38afa34df372063da",
"size": "4386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/format/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
class NormalizeEmptyResultError(Exception):
# throw error for normalize() when empty
pass
|
{
"content_hash": "8df6fc4aa3654a8efb0ecdf5769920cc",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 43,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.7872340425531915,
"repo_name": "DanCech/graphite-web",
"id": "1349153d0c5109e6cd67b7afa41a52e8b08ad117",
"size": "94",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webapp/graphite/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "150191"
},
{
"name": "HTML",
"bytes": "21521"
},
{
"name": "JavaScript",
"bytes": "1690375"
},
{
"name": "Perl",
"bytes": "857"
},
{
"name": "Python",
"bytes": "1234658"
},
{
"name": "Ruby",
"bytes": "1950"
},
{
"name": "Shell",
"bytes": "1113"
}
],
"symlink_target": ""
}
|
from fabric.api import *
from fabric.colors import red,yellow,green
from fabric.contrib.console import confirm
from fabric.utils import abort,error
from pprint import pprint
import os
# SETTINGS
IMAGES = ["base","postgresql", "mysql", "http", "php", "wsgi"]
NODES = [
## PROD CONTAINERS
{ "id": 1, "name" : "prodpostgresql01" , "image":"qqch/postgresql:latest", "extra_port":"5432%(id)s:5432" },
{ "id": 2, "name" : "prodmysql01" , "image":"qqch/mysql:latest" , "extra_port":"3306%(id)s:3306" },
{ "id": 3, "name" : "prodjibaku01" , "image":"qqch/wsgi:latest" , "extra_port":"800%(id)s:80" },
{ "id": 4, "name" : "prodjeanglode01" , "image":"qqch/wsgi:latest" , "extra_port":"800%(id)s:80" },
{ "id": 5, "name" : "prodjeanglode02" , "image":"qqch/php:latest" , "extra_port":"800%(id)s:80" },
{ "id": 6, "name" : "prodwp01" , "image":"qqch/php:latest" , "extra_port":"800%(id)s:80" },
{ "id": 7, "name" : "prodtorzka01" , "image":"qqch/php:latest" , "extra_port":"800%(id)s:80" },
{ "id": 8, "name" : "prodnolann01" , "image":"qqch/http:latest" , "extra_port":"800%(id)s:80" },
{ "id": 9, "name" : "prodjibaku02" , "image":"qqch/php:latest" , "extra_port":"800%(id)s:80" },
## TEST CONTAINERS
# { "id": 9, "name" : "jouet01" , "image":"qqch/base:latest" , },
]
NETWORKS = [
{ "id": 1, "nodes": ("prodmysql01", "prodjeanglode02", "prodjeanglode01") },
{ "id": 2, "nodes": ("prodpostgresql01", "prodjibaku01")},
{ "id": 3, "nodes": ("prodmysql01", "prodwp01")},
{ "id": 4, "nodes": ("prodmysql01", "prodtorzka01")},
{ "id": 5, "nodes": ("prodmysql01", "prodjibaku01")},
{ "id": 6, "nodes": ("prodmysql01", "prodjibaku02")},
]
DOCKERFILES_ROOT = "/home/docker/dockerfiles"
CONTAINERS_DATAROOT = "/home/docker/volumes/"
DKCLEAN_CMD = "/home/docker/bin/dkclean.sh"
PIPEWORK_CMD = "sudo /home/docker/bin/pipework"
# TASKS
@task
def node(nodename,cmd=None):
"""
The node mini wrapper, values for cmd arg : boot,start,stop,destroy
* fab node:{nodename},boot : do a 'docker.io run' on each container, run it once time
* fab node:{nodename},stop : stop all containers in the node
* fab node:{nodename},start : do a 'docker.io start' on each container
* fab node:{nodename},destroy : destroy all the node and clean docker's data (see dkclean)
"""
generic_msg = "\nAvailable commands are :\n * start\n * stop\n * destroy\n * boot"
if cmd == "start":
node_start(nodename)
puts(green("node start [OK]"))
elif cmd == "stop":
node_stop(nodename)
puts(green("node stop [OK]"))
elif cmd == "boot":
node_boot(nodename)
puts(green("node boot [OK]"))
elif cmd == "destroy":
if confirm("Are you sure to destroy ? Everything you need is backuped ?",default=False):
node_destroy(nodename)
puts(green("node destroy [OK]"))
else:
abort(yellow("node destroy [CANCELED]"))
elif cmd == None:
puts(generic_msg)
else:
error(red("commande %s not found !" % cmd))
puts(generic_msg)
@task
def cluster(cmd=None):
"""
The cluster mini wrapper, values for cmd arg : boot,start,stop,destroy
* fab cluster:boot : do a 'docker.io run' on each container, run it once time
* fab cluster:stop : stop all containers in the cluster
* fab cluster:start : do a 'docker.io start' on each container
* fab cluster:destroy : destroy all the cluster and clean docker's data (see dkclean)
"""
generic_msg = "\nAvailable commands are :\n * start\n * stop\n * destroy\n * boot"
if cmd == "start":
cluster_start()
puts(green("cluster start [OK]"))
elif cmd == "stop":
cluster_stop()
puts(green("cluster stop [OK]"))
elif cmd == "boot":
cluster_boot()
puts(green("cluster boot [OK]"))
elif cmd == "destroy":
if confirm("Are you sure to destroy ? Everything you need is backuped ?",default=False):
cluster_destroy()
puts(green("cluster destroy [OK]"))
else:
abort(yellow("cluster destroy [CANCELED]"))
elif cmd == None:
puts(generic_msg)
else:
error(red("commande %s not found !" % cmd))
puts(generic_msg)
@task
def dockerui(cmd=None):
"""
The dockerui mini wrapper, values for cmd arg : boot,start,stop,destroy
* fab dockerui:boot : do a 'docker.io run' on each container, run it once time
* fab dockerui:stop : stop all containers in the dockerui
* fab dockerui:start : do a 'docker.io start' on each container
* fab dockerui:destroy : destroy all the dockerui and clean docker's data (see dkclean)
"""
generic_msg = "\nAvailable commands are :\n * start\n * stop\n * destroy\n * boot"
if cmd == "start":
dockerui_start()
puts(green("dockerui start [OK]"))
elif cmd == "stop":
dockerui_stop()
puts(green("dockerui stop [OK]"))
elif cmd == "boot":
dockerui_boot()
puts(green("dockerui boot [OK]"))
elif cmd == "destroy":
if confirm("Are you sure to destroy ? Everything you need is backuped ?",default=False):
dockerui_destroy()
puts(green("dockerui destroy [OK]"))
else:
abort(yellow("dockerui destroy [CANCELED]"))
elif cmd == None:
puts(generic_msg)
else:
error(red("commande %s not found !" % cmd))
puts(generic_msg)
@task
def build(*images):
"""
Build given images in args. if args is empty it builds %s
""" % IMAGES
if len(images) <= 0:
images = IMAGES
for image in images:
dirname = "qqch-%s" % (image,)
options = {
"image":image,
"dpath" : os.path.join(DOCKERFILES_ROOT, dirname),
}
local("docker.io build -t qqch/%(image)s %(dpath)s" % options)
dkclean()
@task
def dkclean():
"""
Warn the user to run the dkclean
"""
warn(yellow("Please run : %s" % DKCLEAN_CMD))
def dockerui_boot():
local("docker.io run -h dockerui --name=dockerui -d -p 9000:9000 -v /var/run/docker.sock:/docker.sock crosbymichael/dockerui -e /docker.sock")
def dockerui_start():
local("docker.io start dockerui")
def dockerui_stop():
local("docker.io stop dockerui")
def dockerui_destroy():
local("docker.io kill dockerui")
dkclean()
def getports(defnode):
options = {
"convention" : "--publish=220%(id)s:22 --publish=490%(id)s:4949" ,
"extra" : "",
}
if defnode.has_key("extra_port"):
options["extra"] = "--publish=%(extra_port)s" % defnode
tpl = "%(convention)s %(extra)s" % options
return tpl % defnode
def getvpath(defnode):
return os.path.join(CONTAINERS_DATAROOT, defnode.get("name"))
def getlinks(defnode):
r = ""
for link in defnode.get("links", [] ):
fragment = "--link=%s" % link
r = r + fragment
return r
CMD_BOOT = "docker.io run --hostname=%(name)s --name=%(name)s -v %(vpath)s:/data %(ports)s %(linking)s -d -t %(image)s /sbin/my_init"
def cluster_boot():
for defnode in NODES:
defnode["ports"] = getports(defnode)
defnode["vpath"] = getvpath(defnode)
defnode["linking"] = getlinks(defnode)
local("mkdir -p %(vpath)s" % defnode)
cmd = CMD_BOOT % defnode
local(cmd)
network_up()
def node_find(nodename):
"""
Search a node by it's name, return None if not found
"""
for node in NODES:
if node.get("name") == nodename:
found = True
return node
puts(red("%s not found in cluster definition" % nodename))
return None
def node_boot(nodename):
defnode = node_find(nodename)
if defnode:
defnode["ports"] = getports(defnode)
defnode["vpath"] = getvpath(defnode)
defnode["linking"] = getlinks(defnode)
local(CMD_BOOT % defnode)
network_up(nodename)
def cluster_exec(cmd):
for defnode in NODES :
local(cmd % defnode)
def node_exec(nodename,cmd):
defnode = node_find(nodename)
if defnode:
local(cmd % defnode)
CMD_STOP = "docker.io stop %(name)s"
def cluster_stop():
cluster_exec(CMD_STOP)
def node_stop(nodename):
node_exec(nodename, CMD_STOP)
CMD_DESTROY = "docker.io kill %(name)s"
def cluster_destroy():
cluster_exec(CMD_DESTROY)
dkclean()
def node_destroy(nodename):
node_exec(nodename, CMD_DESTROY)
dkclean()
CMD_START = "docker.io start %(name)s"
def cluster_start():
cluster_exec(CMD_START)
network_up()
def node_start(nodename):
node_exec(nodename, CMD_START)
network_up(nodename)
def conf_render(tpl):
for defnode in NODES:
puts(tpl % defnode)
@task
def conf(cmd=None):
"""
The conf mini wrapper, values for cmd arg : munin, upstreams, ufw
* fab conf:munin : generate the node tree for /etc/munin/munin.conf
* fab conf:upstreams : generate the upstreams definition for nginx
* fab conf:ufw : generate the ufw rules for making ssh available from the outside for every container
"""
generic_msg = "\nAvailable commands are :\n * munin\n * ssh"
if cmd == "munin":
conf_munin()
elif cmd == "upstreams":
conf_upstreams()
elif cmd == "ufw":
puts(generic_msg)
elif cmd == None:
puts(generic_msg)
else:
error(red("commande %s not found !" % cmd))
puts(generic_msg)
def conf_munin():
tpl = """
[docker;%(name)s]
address 172.17.42.1
port 490%(id)s
use_node_name yes
"""
conf_render(tpl)
def conf_upstreams():
tpl ="""
upstream %(name)s {
server 172.17.42.1:800%(id)s;
}
"""
conf_render(tpl)
def conf_ufw():
tpl = """
ufw allow out 220%(id)s
"""
conf_render(tpl)
@task
def network_up(onlyfornode=None):
"""
Build privates networks between containers
"""
for network in NETWORKS:
options = {
"pipework": PIPEWORK_CMD,
}
for idx, node in enumerate(network.get("nodes")):
options["node"] = node
options["ip"] = idx+1
options.update(network)
cmd = "%(pipework)s br%(id)s -i eth%(id)s %(node)s 192.168.%(id)s.%(ip)s/24" % options
if onlyfornode is not None:
if node == onlyfornode:
local(cmd)
else:
local(cmd)
@task
def ssh(name):
"""
Open a ssh session to qqch@{container_name}
"""
found = False
for node in NODES:
if node.get("name") == name:
local("ssh qqch@127.1 -p220%(id)s " % node)
found = True
if not found:
puts(red("%s not found in cluster definition" % name))
@task
def ls():
"""
List containers configuration
"""
for node in NODES:
puts(yellow("%s : " % node.get("name") ))
puts("------------------------")
pprint(node)
puts("------------------------\n\n")
### MYSQL PART
from tissu.tasks import *
import os, sys
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir)
def run_mysql(query, shell=False):
from tissu.conf import settings
node = node_find(settings.NODE_NAME)
options = {"query": query}
options.update(settings.DB)
if shell:
cmd = "mysql --user=%(user)s --host=%(host)s -p%(password)s" % options
else:
cmd = "mysql --user=%(user)s --host=%(host)s -p%(password)s -e \"%(query)s\"" % options
run(cmd)
@roles('mysql')
@task
def mysql_shell():
run_mysql("", shell=True)
@roles('mysql')
@task
def mysql_user_create(login, password,host="%"):
sql="CREATE USER '%s'@'%s' IDENTIFIED BY '%s'" % (login,host,password)
run_mysql(sql)
@roles('mysql')
@task
def mysql_user_delete(login):
drop = "DROP USER %s" % (login,)
run_mysql(drop)
@roles('mysql')
@task
def mysql_database_create(name):
sql="CREATE DATABASE %s" % (name,)
run_mysql(sql)
@roles('mysql')
@task
def mysql_database_delete(name):
sql ="DROP DATABASE %s" % (name,)
run_mysql(sql)
@roles('mysql')
@task
def mysql_privileges_add(login,database,host="%"):
"""
login, database, host=localhost
"""
sql = "GRANT ALL PRIVILEGES ON %s.* TO '%s'@'%s'" % (database,login,host)
run_mysql(sql)
@roles('mysql')
@task
def mysql_privileges_delete(login,database,host="%"):
sql ="REVOKE ALL PRIVILEGES ON %s.* FROM '%s'@'%s'" % (database,login,host)
run_mysql(sql)
@roles('mysql')
@task
def mysql_privileges_show(login,host="%"):
sql = "SHOW GRANTS FOR %s@%s" % (login,host)
run_mysql(sql)
@roles('mysql')
@task
def mysql_account_create(login,password):
"""
login password
"""
mysql_user_create(login,password)
mysql_database_create(login)
mysql_privileges_add(login,login)
|
{
"content_hash": "5c349f9fe1baaa37aa9a12f24f47d38e",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 146,
"avg_line_length": 28.188034188034187,
"alnum_prop": 0.5778502122498483,
"repo_name": "Quelquechose/dockertools",
"id": "2107fd086a35545156601f0030c9bccda8b97091",
"size": "13271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Shell",
"bytes": "7316"
}
],
"symlink_target": ""
}
|
from glanceclient.exc import *
|
{
"content_hash": "d95a46c68138ee94c6d298498d68db79",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.8064516129032258,
"repo_name": "mmasaki/python-glanceclient",
"id": "64cc01ee49463258d766148bb61df908296c36c4",
"size": "174",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "glanceclient/common/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "576366"
},
{
"name": "Shell",
"bytes": "3174"
}
],
"symlink_target": ""
}
|
"""
Gauged
https://github.com/chriso/gauged (MIT Licensed)
Copyright 2014 (c) Chris O'Hara <cohara87@gmail.com>
"""
class Aggregate(object):
SUM = 'sum'
MIN = 'min'
MAX = 'max'
MEAN = 'mean'
STDDEV = 'stddev'
PERCENTILE = 'percentile'
MEDIAN = 'median'
COUNT = 'count'
ALL = set([SUM, MIN, MAX, MEAN, STDDEV, PERCENTILE, MEDIAN, COUNT])
ASSOCIATIVE = set([SUM, MIN, MAX, COUNT])
|
{
"content_hash": "10bafdb266dac7c8bcf9ba6041b03ec4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 71,
"avg_line_length": 20.19047619047619,
"alnum_prop": 0.6061320754716981,
"repo_name": "chriso/gauged",
"id": "8a9b685bb0cff705d58935885f5e8d13cdab850e",
"size": "424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gauged/aggregates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "60061"
},
{
"name": "Makefile",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "190561"
}
],
"symlink_target": ""
}
|
import json
import os
import sys
import time
import docopt
import pkg_resources
import six
import dcoscli
from dcos import cmds, emitting, http, jsonitem, marathon, options, util
from dcos.errors import DCOSException
from dcoscli import tables
from dcoscli.subcommand import default_command_info, default_doc
from dcoscli.util import decorate_docopt_usage
logger = util.get_logger(__name__)
emitter = emitting.FlatEmitter()
def main(argv):
try:
return _main(argv)
except DCOSException as e:
emitter.publish(e)
return 1
@decorate_docopt_usage
def _main(argv):
args = docopt.docopt(
default_doc("marathon"),
argv=argv,
version='dcos-marathon version {}'.format(dcoscli.version))
return cmds.execute(_cmds(), args)
def _cmds():
"""
:returns: all the supported commands
:rtype: dcos.cmds.Command
"""
subcommand = MarathonSubcommand()
return [
cmds.Command(
hierarchy=['marathon', 'version', 'list'],
arg_keys=['<app-id>', '--max-count'],
function=subcommand.version_list),
cmds.Command(
hierarchy=['marathon', 'deployment', 'list'],
arg_keys=['<app-id>', '--json'],
function=subcommand.deployment_list),
cmds.Command(
hierarchy=['marathon', 'deployment', 'rollback'],
arg_keys=['<deployment-id>'],
function=subcommand.deployment_rollback),
cmds.Command(
hierarchy=['marathon', 'deployment', 'stop'],
arg_keys=['<deployment-id>'],
function=subcommand.deployment_stop),
cmds.Command(
hierarchy=['marathon', 'deployment', 'watch'],
arg_keys=['<deployment-id>', '--max-count', '--interval'],
function=subcommand.deployment_watch),
cmds.Command(
hierarchy=['marathon', 'task', 'list'],
arg_keys=['<app-id>', '--json'],
function=subcommand.task_list),
cmds.Command(
hierarchy=['marathon', 'task', 'stop'],
arg_keys=['<task-id>', '--wipe'],
function=subcommand.task_stop),
cmds.Command(
hierarchy=['marathon', 'task', 'kill'],
arg_keys=['<task-ids>', '--scale', '--wipe', '--json'],
function=subcommand.task_kill),
cmds.Command(
hierarchy=['marathon', 'task', 'show'],
arg_keys=['<task-id>'],
function=subcommand.task_show),
cmds.Command(
hierarchy=['marathon', 'app', 'add'],
arg_keys=['<app-resource>'],
function=subcommand.add),
cmds.Command(
hierarchy=['marathon', 'app', 'list'],
arg_keys=['--json'],
function=subcommand.list),
cmds.Command(
hierarchy=['marathon', 'app', 'remove'],
arg_keys=['<app-id>', '--force'],
function=subcommand.remove),
cmds.Command(
hierarchy=['marathon', 'app', 'show'],
arg_keys=['<app-id>', '--app-version'],
function=subcommand.show),
cmds.Command(
hierarchy=['marathon', 'app', 'start'],
arg_keys=['<app-id>', '<instances>', '--force'],
function=subcommand.start),
cmds.Command(
hierarchy=['marathon', 'app', 'stop'],
arg_keys=['<app-id>', '--force'],
function=subcommand.stop),
cmds.Command(
hierarchy=['marathon', 'app', 'update'],
arg_keys=['<app-id>', '<properties>', '--force'],
function=subcommand.update),
cmds.Command(
hierarchy=['marathon', 'app', 'restart'],
arg_keys=['<app-id>', '--force'],
function=subcommand.restart),
cmds.Command(
hierarchy=['marathon', 'app', 'kill'],
arg_keys=['<app-id>', '--scale', '--host'],
function=subcommand.kill),
cmds.Command(
hierarchy=['marathon', 'group', 'add'],
arg_keys=['<group-resource>'],
function=subcommand.group_add),
cmds.Command(
hierarchy=['marathon', 'group', 'list'],
arg_keys=['--json'],
function=subcommand.group_list),
cmds.Command(
hierarchy=['marathon', 'group', 'show'],
arg_keys=['<group-id>', '--group-version'],
function=subcommand.group_show),
cmds.Command(
hierarchy=['marathon', 'group', 'remove'],
arg_keys=['<group-id>', '--force'],
function=subcommand.group_remove),
cmds.Command(
hierarchy=['marathon', 'group', 'update'],
arg_keys=['<group-id>', '<properties>', '--force'],
function=subcommand.group_update),
cmds.Command(
hierarchy=['marathon', 'group', 'scale'],
arg_keys=['<group-id>', '<scale-factor>', '--force'],
function=subcommand.group_scale),
cmds.Command(
hierarchy=['marathon', 'pod', 'add'],
arg_keys=['<pod-resource>'],
function=subcommand.pod_add),
cmds.Command(
hierarchy=['marathon', 'pod', 'remove'],
arg_keys=['<pod-id>', '--force'],
function=subcommand.pod_remove),
cmds.Command(
hierarchy=['marathon', 'pod', 'list'],
arg_keys=['--json'],
function=subcommand.pod_list),
cmds.Command(
hierarchy=['marathon', 'pod', 'show'],
arg_keys=['<pod-id>'],
function=subcommand.pod_show),
cmds.Command(
hierarchy=['marathon', 'pod', 'update'],
arg_keys=['<pod-id>', '--force'],
function=subcommand.pod_update),
cmds.Command(
hierarchy=['marathon', 'pod', 'kill'],
arg_keys=['<pod-id>', '<instance-ids>'],
function=subcommand.pod_kill),
cmds.Command(
hierarchy=['marathon', 'debug', 'list'],
arg_keys=['--json'],
function=subcommand.debug_list),
cmds.Command(
hierarchy=['marathon', 'debug', 'summary'],
arg_keys=['<app-id>', '--json'],
function=subcommand.debug_summary),
cmds.Command(
hierarchy=['marathon', 'debug', 'details'],
arg_keys=['<app-id>', '--json'],
function=subcommand.debug_details),
cmds.Command(
hierarchy=['marathon', 'about'],
arg_keys=[],
function=subcommand.about),
cmds.Command(
hierarchy=['marathon'],
arg_keys=['--config-schema', '--info'],
function=_marathon)
]
def _marathon(config_schema, info):
"""
:param config_schema: Whether to output the config schema
:type config_schema: boolean
:param info: Whether to output a description of this subcommand
:type info: boolean
:returns: process return code
:rtype: int
"""
if config_schema:
schema = _cli_config_schema()
emitter.publish(schema)
elif info:
_info()
else:
doc = default_command_info("marathon")
emitter.publish(options.make_generic_usage_message(doc))
return 1
return 0
def _info():
"""
:returns: process return code
:rtype: int
"""
emitter.publish(default_command_info("marathon"))
return 0
class ResourceReader(object):
"""Encapsulates side-effecting methods for loading Marathon resources."""
@staticmethod
def get_resource(name):
"""
:param name: optional filename or http(s) url
for the application or group resource
:type name: str | None
:returns: resource
:rtype: dict
"""
if name is not None:
if os.path.isfile(name):
with util.open_file(name) as resource_file:
return util.load_json(resource_file)
else:
try:
http.silence_requests_warnings()
req = http.get(name)
if req.status_code == 200:
data = b''
for chunk in req.iter_content(1024):
data += chunk
return util.load_jsons(data.decode('utf-8'))
else:
raise Exception
except Exception:
logger.exception('Cannot read from resource %s', name)
raise DCOSException(
"Can't read from resource: {0}.\n"
"Please check that it exists.".format(name))
example = "E.g.: dcos marathon app add < app_resource.json"
ResourceReader._assert_no_tty(example)
return util.load_json(sys.stdin)
@staticmethod
def get_resource_from_properties(properties):
"""
:param properties: JSON items in the form key=value
:type properties: [str]
:returns: resource JSON
:rtype: dict
"""
if len(properties) == 0:
example =\
"E.g. dcos marathon app update your-app-id < app_update.json"
ResourceReader._assert_no_tty(example)
return util.load_jsons(sys.stdin.read())
resource_json = {}
for prop in properties:
key, value = jsonitem.parse_json_item(prop, None)
key = jsonitem.clean_value(key)
if key in resource_json:
raise DCOSException(
'Key {!r} was specified more than once'.format(key))
resource_json[key] = value
return resource_json
@staticmethod
def _assert_no_tty(command_example):
if sys.stdin.isatty():
# We don't support TTY right now.
# In the future we will start an editor
template = ("We currently don't support reading from the TTY. "
"Please specify an application JSON.\n{}")
raise DCOSException(template.format(command_example))
class MarathonSubcommand(object):
"""Defines a method for each operation of the `dcos marathon` subcommand.
:param resource_reader: provides input methods for Marathon resources
:type resource_reader: ResourceReader
:param create_marathon_client: a callable that returns an instance of
marathon.Client
:type create_marathon_client: collections.abc.Callable
"""
def __init__(self,
resource_reader=ResourceReader(),
create_marathon_client=marathon.create_client):
self._resource_reader = resource_reader
self._create_marathon_client = create_marathon_client
def about(self):
"""
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
emitter.publish(client.get_about())
return 0
def add(self, app_resource):
"""
:param app_resource: optional filename for the application resource
:type app_resource: str
:returns: process return code
:rtype: int
"""
application_resource = self._resource_reader.get_resource(app_resource)
# Add application to marathon
client = self._create_marathon_client()
# Check that the application doesn't exist
app_id = util.normalize_marathon_id_path(application_resource['id'])
try:
client.get_app(app_id)
except DCOSException as e:
logger.exception(e)
else:
message = "Application '{}' already exists".format(app_id)
raise DCOSException(message)
deployment = client.add_app(application_resource)
emitter.publish('Created deployment {}'.format(deployment))
return 0
def list(self, json_):
"""
:param json_: output json if True
:type json_: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
apps = client.get_apps()
if json_:
emitter.publish(apps)
else:
deployments = client.get_deployments()
queued_apps = client.get_queued_apps()
_enhance_row_with_overdue_information(apps, queued_apps)
table = tables.app_table(apps, deployments)
output = six.text_type(table)
if output:
emitter.publish(output)
return 0
def group_list(self, json_):
"""
:param json_: output json if True
:type json_: bool
:rtype: int
:returns: process return code
"""
client = self._create_marathon_client()
groups = client.get_groups()
emitting.publish_table(emitter, groups, tables.group_table, json_)
return 0
def group_add(self, group_resource):
"""
:param group_resource: optional filename for the group resource
:type group_resource: str
:returns: process return code
:rtype: int
"""
group_resource = self._resource_reader.get_resource(group_resource)
client = self._create_marathon_client()
# Check that the group doesn't exist
group_id = util.normalize_marathon_id_path(group_resource['id'])
try:
client.get_group(group_id)
except DCOSException as e:
logger.exception(e)
else:
raise DCOSException("Group '{}' already exists".format(group_id))
deployment = client.create_group(group_resource)
emitter.publish('Created deployment {}'.format(deployment))
return 0
def remove(self, app_id, force):
"""
:param app_id: ID of the app to remove
:type app_id: str
:param force: Whether to override running deployments.
:type force: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
client.remove_app(app_id, force)
return 0
def group_remove(self, group_id, force):
"""
:param group_id: ID of the app to remove
:type group_id: str
:param force: Whether to override running deployments.
:type force: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
client.remove_group(group_id, force)
return 0
def show(self, app_id, version):
"""Show details of a Marathon application.
:param app_id: The id for the application
:type app_id: str
:param version: The version, either absolute (date-time) or relative
:type version: str
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
if version is not None:
version = _calculate_version(client, app_id, version)
app = client.get_app(app_id, version=version)
emitter.publish(app)
return 0
def group_show(self, group_id, version=None):
"""Show details of a Marathon application.
:param group_id: The id for the application
:type group_id: str
:param version: The version, either absolute (date-time) or relative
:type version: str
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
app = client.get_group(group_id, version=version)
emitter.publish(app)
return 0
def group_update(self, group_id, properties, force):
"""
:param group_id: the id of the group
:type group_id: str
:param properties: json items used to update group
:type properties: [str]
:param force: whether to override running deployments
:type force: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
# Ensure that the group exists
client.get_group(group_id)
resource = self._resource_reader.\
get_resource_from_properties(properties)
deployment = client.update_group(group_id, resource, force)
emitter.publish('Created deployment {}'.format(deployment))
return 0
def start(self, app_id, instances, force):
"""Start a Marathon application.
:param app_id: the id for the application
:type app_id: str
:param instances: the number of instances to start
:type instances: str
:param force: whether to override running deployments
:type force: bool
:returns: process return code
:rtype: int
"""
# Check that the application exists
client = self._create_marathon_client()
desc = client.get_app(app_id)
if desc['instances'] > 0:
emitter.publish(
'Application {!r} already started: {!r} instances.'.format(
app_id,
desc['instances']))
return 1
# Need to add the 'id' because it is required
app_json = {'id': app_id}
# Set instances to 1 if not specified
if instances is None:
instances = 1
else:
instances = util.parse_int(instances)
if instances <= 0:
emitter.publish(
'The number of instances must be positive: {!r}.'.format(
instances))
return 1
app_json['instances'] = instances
deployment = client.update_app(app_id, app_json, force)
emitter.publish('Created deployment {}'.format(deployment))
return 0
def stop(self, app_id, force):
"""Stop a Marathon application
:param app_id: the id of the application
:type app_id: str
:param force: whether to override running deployments
:type force: bool
:returns: process return code
:rtype: int
"""
# Check that the application exists
client = self._create_marathon_client()
desc = client.get_app(app_id)
if desc['instances'] <= 0:
emitter.publish(
'Application {!r} already stopped: {!r} instances.'.format(
app_id,
desc['instances']))
return 1
app_json = {'instances': 0}
deployment = client.update_app(app_id, app_json, force)
emitter.publish('Created deployment {}'.format(deployment))
def update(self, app_id, properties, force):
"""
:param app_id: the id of the application
:type app_id: str
:param properties: json items used to update resource
:type properties: [str]
:param force: whether to override running deployments
:type force: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
# Ensure that the application exists
client.get_app(app_id)
resource = self._resource_reader.\
get_resource_from_properties(properties)
deployment = client.update_app(app_id, resource, force)
emitter.publish('Created deployment {}'.format(deployment))
return 0
def group_scale(self, group_id, scale_factor, force):
"""
:param group_id: the id of the group
:type group_id: str
:param scale_factor: scale factor for application group
:type scale_factor: str
:param force: whether to override running deployments
:type force: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
scale_factor = util.parse_float(scale_factor)
deployment = client.scale_group(group_id, scale_factor, force)
emitter.publish('Created deployment {}'.format(deployment))
return 0
def restart(self, app_id, force):
"""
:param app_id: the id of the application
:type app_id: str
:param force: whether to override running deployments
:type force: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
desc = client.get_app(app_id)
if desc['instances'] <= 0:
app_id = util.normalize_marathon_id_path(app_id)
emitter.publish(
'Unable to perform rolling restart of application {!r} '
'because it has no running tasks'.format(
app_id,
desc['instances']))
return 1
payload = client.restart_app(app_id, force)
message = 'Created deployment {}'.format(payload['deploymentId'])
emitter.publish(message)
return 0
def kill(self, app_id, scale, host):
"""
:param app_id: the id of the application
:type app_id: str
:param scale: Scale the app down
:type scale: bool
:param host: Kill only those tasks running on host specified
:type host: str
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
payload = client.kill_tasks(app_id, host=host, scale=scale)
# If scale is provided, the API return a "deploymentResult"
# https://github.com/mesosphere/marathon/blob/50366c8/src/main/scala/mesosphere/marathon/api/RestResource.scala#L34-L36
if scale:
emitter.publish("Started deployment: {}".format(payload))
else:
if 'tasks' in payload:
emitter.publish('Killed tasks: {}'.format(payload['tasks']))
if len(payload['tasks']) == 0:
return 1
else:
emitter.publish('Killed tasks: []')
return 1
return 0
def version_list(self, app_id, max_count):
"""
:param app_id: the id of the application
:type app_id: str
:param max_count: the maximum number of version to fetch and return
:type max_count: str
:returns: process return code
:rtype: int
"""
if max_count is not None:
max_count = util.parse_int(max_count)
client = self._create_marathon_client()
versions = client.get_app_versions(app_id, max_count)
emitter.publish(versions)
return 0
def deployment_list(self, app_id, json_):
"""
:param app_id: the application id
:type app_id: str
:param json_: output json if True
:type json_: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
deployments = client.get_deployments(app_id)
if not deployments and not json_:
msg = "There are no deployments"
if app_id:
msg += " for '{}'".format(app_id)
raise DCOSException(msg)
emitting.publish_table(emitter,
deployments,
tables.deployment_table,
json_)
return 0
def deployment_stop(self, deployment_id):
"""
:param deployment_id: the application id
:type deployment_id: str
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
client.stop_deployment(deployment_id)
return 0
def deployment_rollback(self, deployment_id):
"""
:param deployment_id: the application id
:type deployment_id: str
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
deployment = client.rollback_deployment(deployment_id)
emitter.publish(deployment)
return 0
def deployment_watch(self, deployment_id, max_count, interval):
"""
:param deployment_id: the application id
:type deployment_id: str
:param max_count: maximum number of polling calls
:type max_count: str
:param interval: wait interval in seconds between polling calls
:type interval: str
:returns: process return code
:rtype: int
"""
if max_count is not None:
max_count = util.parse_int(max_count)
interval = 1 if interval is None else util.parse_int(interval)
client = self._create_marathon_client()
count = 0
while max_count is None or count < max_count:
deployment = client.get_deployment(deployment_id)
if deployment is None:
return 0
if util.is_windows_platform():
os.system('cls')
else:
if 'TERM' in os.environ:
os.system('clear')
emitter.publish('Deployment update time: '
'{} \n'.format(time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime())))
emitter.publish(deployment)
time.sleep(interval)
count += 1
return 0
def task_list(self, app_id, json_):
"""
:param app_id: the id of the application
:type app_id: str
:param json_: output json if True
:type json_: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
tasks = client.get_tasks(app_id)
emitting.publish_table(emitter, tasks, tables.app_task_table, json_)
return 0
def task_stop(self, task_id, wipe):
"""Stop a Marathon task
:param task_id: the id of the task
:type task_id: str
:param wipe: whether to wipe persistent data and unreserve resources
:type wipe: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
task = client.stop_task(task_id, wipe)
if task is None:
raise DCOSException("Task '{}' does not exist".format(task_id))
emitter.publish(task)
return 0
def task_kill(self, task_ids, scale, wipe, json_):
"""Kill one or multiple Marathon tasks
:param task_ids: the id of the task
:type task_ids: [str]
:param scale: Scale the app down after killing the specified tasks
:type scale: bool
:param wipe: whether remove reservations and persistent volumes.
:type wipe: bool
:param json_: output JSON if true
:type json_: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
payload = client.kill_and_scale_tasks(task_ids, scale, wipe)
def print_deployment(deployment, json_):
if json_:
emitter.publish(deployment)
else:
emitter.publish('Created deployment: {}'.format(
deployment['deploymentId']))
def print_killed_tasks(payload, json_):
if json_:
emitter.publish(payload)
else:
emitter.publish('Killed tasks: {}'.format(
[task['id'] for task in payload['tasks']]))
if scale:
print_deployment(payload, json_)
else:
print_killed_tasks(payload, json_)
if len(payload['tasks']) == 0:
raise DCOSException(
'Failed to kill tasks. task-ids seems to be unknown')
return 0
def task_show(self, task_id):
"""
:param task_id: the task id
:type task_id: str
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
task = client.get_task(task_id)
if task is None:
raise DCOSException("Task '{}' does not exist".format(task_id))
emitter.publish(task)
return 0
def pod_add(self, pod_resource_path):
"""
:param pod_resource_path: optional file path for the pod resource
:type pod_resource_path: str
:returns: process return code
:rtype: int
"""
marathon_client = self._create_marathon_client()
self._ensure_pods_support(marathon_client)
pod_json = self._resource_reader.get_resource(pod_resource_path)
deployment = marathon_client.add_pod(pod_json)
emitter.publish('Created deployment {}'.format(deployment))
return 0
def pod_remove(self, pod_id, force):
"""
:param pod_id: the Marathon ID of the pod to remove
:type pod_id: str
:param force: whether to override running deployments
:type force: bool
:returns: process return code
:rtype: int
"""
marathon_client = self._create_marathon_client()
self._ensure_pods_support(marathon_client)
marathon_client.remove_pod(pod_id, force)
return 0
def pod_list(self, json_):
"""
:param json_: output JSON if true
:type json_: bool
:returns: process return code
:rtype: int
"""
marathon_client = self._create_marathon_client()
self._ensure_pods_support(marathon_client)
pods = marathon_client.list_pod()
queued_apps = marathon_client.get_queued_apps()
_enhance_row_with_overdue_information(pods, queued_apps)
emitting.publish_table(emitter, pods, tables.pod_table, json_)
return 0
def pod_show(self, pod_id):
"""Show details of a Marathon pod.
:param pod_id: the Marathon ID of the pod to show
:type pod_id: str
:returns: process return code
:rtype: int
"""
marathon_client = self._create_marathon_client()
self._ensure_pods_support(marathon_client)
pod_json = marathon_client.show_pod(pod_id)
emitter.publish(pod_json)
return 0
def pod_update(self, pod_id, force):
"""
:param pod_id: the Marathon ID of the pod to update
:type pod_id: str
:param force: whether to override running deployments
:type force: bool
:returns: process return code
:rtype: int
"""
marathon_client = self._create_marathon_client()
self._ensure_pods_support(marathon_client)
# Ensure that the pod exists
marathon_client.show_pod(pod_id)
resource = self._resource_reader.get_resource(name=None)
deployment_id = marathon_client.update_pod(
pod_id, pod_json=resource, force=force)
emitter.publish('Created deployment {}'.format(deployment_id))
return 0
def pod_kill(self, pod_id, instance_ids):
"""
:param pod_id: the Marathon ID of the pod to kill instances from
:type pod_id: str
:param instance_ids: the instance IDs to kill
:type instance_ids: [str]
:returns: process return code
:rtype: int
"""
if not instance_ids:
raise DCOSException('Please provide at least one pod instance ID')
marathon_client = self._create_marathon_client()
self._ensure_pods_support(marathon_client)
marathon_client.kill_pod_instances(pod_id, instance_ids)
return 0
def debug_list(self, json_):
"""
:param json_: output json if True
:type json_: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
queued_apps = client.get_queued_apps()
emitting.publish_table(emitter, queued_apps,
tables.queued_apps_table, json_)
return 0
def debug_details(self, app_id, json_):
"""
:param app_id: the Marathon ID to display details
:type app_id: string
:param json_: output json if True
:type json_: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
queued_app = client.get_queued_app(app_id)
if queued_app:
emitting.publish_table(
emitter, queued_app,
tables.queued_app_details_table, json_)
else:
raise DCOSException("No apps found in Marathon queue")
return 0
def debug_summary(self, app_id, json_):
"""
:param app_id: the Marathon ID to display details
:type app_id: string
:param json_: output json if True
:type json_: bool
:returns: process return code
:rtype: int
"""
client = self._create_marathon_client()
queued_app = client.get_queued_app(app_id)
if queued_app:
if queued_app.get('processedOffersSummary'):
emitting.publish_table(
emitter, queued_app,
tables.queued_app_table, json_)
else:
msg = "This command is not supported on your cluster"
raise DCOSException(msg)
else:
raise DCOSException("No apps found in Marathon queue")
return 0
@staticmethod
def _ensure_pods_support(marathon_client):
"""Raises an exception if the given client is communicating with a
version of Marathon that doesn't support pods.
:param marathon_client: the Marathon client to check
:type marathon_client: dcos.marathon.Client
:rtype: None
"""
if not marathon_client.pod_feature_supported():
msg = 'This command is not supported by your version of Marathon'
raise DCOSException(msg)
def _enhance_row_with_overdue_information(rows, queued_apps):
"""Calculates if configured `backoff` duration for this
app or pod definition was exceeded. In that case this application
is marked as `overdue`.
If the app or pod inside this row should be
:param rows: list of rows
:type rows: []
:param queued_apps: list of
:type queued_apps: []
:returns: true if pod is overdue, false otherwise
:rtype: bool
"""
for row in rows:
queued_app = next(
(app for app in queued_apps
if row.get('id') == marathon.get_app_or_pod_id(app)),
None)
row['overdue'] = queued_app.get('delay', {}) \
.get('overdue', False) if queued_app else False
return rows
def _calculate_version(client, app_id, version):
"""
:param client: Marathon client
:type client: dcos.marathon.Client
:param app_id: The ID of the application
:type app_id: str
:param version: Relative or absolute version or None
:type version: str
:returns: The absolute version as an ISO8601 date-time
:rtype: str
"""
# First let's try to parse it as a negative integer
try:
value = util.parse_int(version)
except DCOSException:
logger.exception('Unable to parse version %s', version)
return version
else:
if value < 0:
value = -1 * value
# We have a negative value let's ask Marathon for the last
# abs(value)
versions = client.get_app_versions(app_id, value + 1)
if len(versions) <= value:
# We don't have enough versions. Return an error.
msg = "Application {!r} only has {!r} version(s)."
raise DCOSException(msg.format(app_id, len(versions), value))
else:
return versions[value]
else:
raise DCOSException(
'Relative versions must be negative: {}'.format(version))
def _cli_config_schema():
"""
:returns: schema for marathon cli config
:rtype: dict
"""
return json.loads(
pkg_resources.resource_string(
'dcos',
'data/config-schema/marathon.json').decode('utf-8'))
|
{
"content_hash": "4bfa69ec3bd23ca20d9491dfb689cd72",
"timestamp": "",
"source": "github",
"line_count": 1183,
"max_line_length": 127,
"avg_line_length": 30.772612003381234,
"alnum_prop": 0.5590869135259862,
"repo_name": "mesosphere/dcos-cli",
"id": "e64976c9e251d2d4b531a38de1a86046ae2b90e9",
"size": "36404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cli/dcoscli/marathon/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "33616"
},
{
"name": "Makefile",
"bytes": "282"
},
{
"name": "PowerShell",
"bytes": "11778"
},
{
"name": "Python",
"bytes": "423926"
},
{
"name": "Shell",
"bytes": "25973"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('soamgr', '0005_ordersoa_is_adviser_approval'),
]
operations = [
migrations.AlterField(
model_name='revieworderagreement',
name='soa_order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='soa_order', to='soamgr.OrderSoa'),
),
]
|
{
"content_hash": "d9f8b3ece1ba30e84eafb9b3a622f81c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 129,
"avg_line_length": 27.157894736842106,
"alnum_prop": 0.6531007751937985,
"repo_name": "nikkomidoy/project_soa",
"id": "17a91b6e5e3e642f2054999658f5a0021c833c18",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soamgr/migrations/0006_auto_20160430_1448.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1775"
},
{
"name": "HTML",
"bytes": "57470"
},
{
"name": "JavaScript",
"bytes": "68379"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "63453"
},
{
"name": "Shell",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from twilio.twiml.voice_response import Dial, VoiceResponse, Sim
response = VoiceResponse()
dial = Dial(record='record-from-ringing')
dial.sim('DE8caa2afb9d5279926619c458dc7098a8')
response.append(dial)
print(response)
|
{
"content_hash": "b3c8028e80a57ba9330e5c95934dcc2f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 64,
"avg_line_length": 27.625,
"alnum_prop": 0.8054298642533937,
"repo_name": "TwilioDevEd/api-snippets",
"id": "72b6c20770005fd0e0ff25ce0011583dbe90c180",
"size": "221",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "twiml/voice/sim/sim-2/sim-2.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import numpy as np
from .. import backend as K
from .. import activations, initializations, regularizers
from ..engine import Layer, InputSpec
def time_distributed_dense(x, w, b=None, dropout=None,
input_dim=None, output_dim=None, timesteps=None):
'''Apply y.w + b for every temporal slice y of x.
'''
if not input_dim:
# won't work with TensorFlow
input_dim = K.shape(x)[2]
if not timesteps:
# won't work with TensorFlow
timesteps = K.shape(x)[1]
if not output_dim:
# won't work with TensorFlow
output_dim = K.shape(w)[1]
if dropout is not None and 0. < dropout < 1.:
# apply the same dropout pattern at every timestep
ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
dropout_matrix = K.dropout(ones, dropout)
expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
x = K.in_train_phase(x * expanded_dropout_matrix, x)
# collapse time dimension and batch dimension together
x = K.reshape(x, (-1, input_dim))
x = K.dot(x, w)
if b:
x = x + b
# reshape to 3D tensor
x = K.reshape(x, (-1, timesteps, output_dim))
return x
class Recurrent(Layer):
'''Abstract base class for recurrent layers.
Do not use in a model -- it's not a valid layer!
Use its children classes `LSTM`, `GRU` and `SimpleRNN` instead.
All recurrent layers (`LSTM`, `GRU`, `SimpleRNN`) also
follow the specifications of this class and accept
the keyword arguments listed below.
# Example
```python
# as the first layer in a Sequential model
model = Sequential()
model.add(LSTM(32, input_shape=(10, 64)))
# now model.output_shape == (None, 32)
# note: `None` is the batch dimension.
# the following is identical:
model = Sequential()
model.add(LSTM(32, input_dim=64, input_length=10))
# for subsequent layers, not need to specify the input size:
model.add(LSTM(16))
```
# Arguments
weights: list of Numpy arrays to set as initial weights.
The list should have 3 elements, of shapes:
`[(input_dim, output_dim), (output_dim, output_dim), (output_dim,)]`.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False). If True, the network will be unrolled,
else a symbolic loop will be used. When using TensorFlow, the network
is always unrolled, so this argument does not do anything.
Unrolling can speed-up a RNN, although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
consume_less: one of "cpu", "mem", or "gpu" (LSTM/GRU only).
If set to "cpu", the RNN will use
an implementation that uses fewer, larger matrix products,
thus running faster on CPU but consuming more memory.
If set to "mem", the RNN will use more matrix products,
but smaller ones, thus running slower (may actually be faster on GPU)
while consuming less memory.
If set to "gpu" (LSTM/GRU only), the RNN will combine the input gate,
the forget gate and the output gate into a single matrix,
enabling more time-efficient parallelization on the GPU. Note: RNN
dropout must be shared for all gates, resulting in a slightly
reduced regularization.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shape
3D tensor with shape `(nb_samples, timesteps, input_dim)`.
# Output shape
- if `return_sequences`: 3D tensor with shape
`(nb_samples, timesteps, output_dim)`.
- else, 2D tensor with shape `(nb_samples, output_dim)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# TensorFlow warning
For the time being, when using the TensorFlow backend,
the number of timesteps used must be specified in your model.
Make sure to pass an `input_length` int argument to your
recurrent layer (if it comes first in your model),
or to pass a complete `input_shape` argument to the first layer
in your model otherwise.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch.
This assumes a one-to-one mapping between
samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
a `batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
a `batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs *including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on using dropout with TensorFlow
When using the TensorFlow backend, specify a fixed batch size for your model
following the notes on statefulness RNNs.
'''
def __init__(self, weights=None,
return_sequences=False, go_backwards=False, stateful=False,
unroll=False, consume_less='cpu',
input_dim=None, input_length=None, **kwargs):
self.return_sequences = return_sequences
self.initial_weights = weights
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.consume_less = consume_less
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.input_dim = input_dim
self.input_length = input_length
if self.input_dim:
kwargs['input_shape'] = (self.input_length, self.input_dim)
super(Recurrent, self).__init__(**kwargs)
def get_output_shape_for(self, input_shape):
if self.return_sequences:
return (input_shape[0], input_shape[1], self.output_dim)
else:
return (input_shape[0], self.output_dim)
def compute_mask(self, input, mask):
if self.return_sequences:
return mask
else:
return None
def step(self, x, states):
raise NotImplementedError
def get_constants(self, x):
return []
def get_initial_states(self, x):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(x) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.output_dim]) # (samples, output_dim)
initial_states = [initial_state for _ in range(len(self.states))]
return initial_states
def preprocess_input(self, x):
return x
def call(self, x, mask=None):
# input shape: (nb_samples, time (padded with zeros), input_dim)
# note that the .build() method of subclasses MUST define
# self.input_spec with a complete input shape.
input_shape = self.input_spec[0].shape
if K._BACKEND == 'tensorflow':
if not input_shape[1]:
raise Exception('When using TensorFlow, you should define '
'explicitly the number of timesteps of '
'your sequences.\n'
'If your first layer is an Embedding, '
'make sure to pass it an "input_length" '
'argument. Otherwise, make sure '
'the first layer has '
'an "input_shape" or "batch_input_shape" '
'argument, including the time axis. '
'Found input shape at layer ' + self.name +
': ' + str(input_shape))
if self.stateful:
initial_states = self.states
else:
initial_states = self.get_initial_states(x)
constants = self.get_constants(x)
preprocessed_input = self.preprocess_input(x)
last_output, outputs, states = K.rnn(self.step, preprocessed_input,
initial_states,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll,
input_length=input_shape[1])
if self.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.states[i], states[i]))
if self.return_sequences:
return outputs
else:
return last_output
def get_config(self):
config = {'return_sequences': self.return_sequences,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'consume_less': self.consume_less}
if self.stateful:
config['batch_input_shape'] = self.input_spec[0].shape
else:
config['input_dim'] = self.input_dim
config['input_length'] = self.input_length
base_config = super(Recurrent, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SimpleRNN(Recurrent):
'''Fully-connected RNN where the output is to be fed back to input.
# Arguments
output_dim: dimension of the internal projections and the final output.
init: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
inner_init: initialization function of the inner cells.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_W: float between 0 and 1. Fraction of the input units to drop for input gates.
dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections.
# References
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
'''
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(SimpleRNN, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
if self.stateful:
self.reset_states()
else:
# initial states: all-zero tensor of shape (output_dim)
self.states = [None]
input_dim = input_shape[2]
self.input_dim = input_dim
self.W = self.init((input_dim, self.output_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.output_dim, self.output_dim),
name='{}_U'.format(self.name))
self.b = K.zeros((self.output_dim,), name='{}_b'.format(self.name))
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.trainable_weights = [self.W, self.U, self.b]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim))]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
return time_distributed_dense(x, self.W, self.b, self.dropout_W,
input_dim, self.output_dim,
timesteps)
else:
return x
def step(self, x, states):
prev_output = states[0]
B_U = states[1]
B_W = states[2]
if self.consume_less == 'cpu':
h = x
else:
h = K.dot(x * B_W, self.W) + self.b
output = self.activation(h + K.dot(prev_output * B_U, self.U))
return output, [output]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'activation': self.activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(SimpleRNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GRU(Recurrent):
'''Gated Recurrent Unit - Cho et al. 2014.
# Arguments
output_dim: dimension of the internal projections and the final output.
init: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
inner_init: initialization function of the inner cells.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
inner_activation: activation function for the inner cells.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_W: float between 0 and 1. Fraction of the input units to drop for input gates.
dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections.
# References
- [On the Properties of Neural Machine Translation: Encoder–Decoder Approaches](http://www.aclweb.org/anthology/W14-4012)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/pdf/1412.3555v1.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
'''
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(GRU, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
self.input_dim = input_shape[2]
if self.stateful:
self.reset_states()
else:
# initial states: all-zero tensor of shape (output_dim)
self.states = [None]
if self.consume_less == 'gpu':
self.W = self.init((self.input_dim, 3 * self.output_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.output_dim, 3 * self.output_dim),
name='{}_U'.format(self.name))
self.b = K.variable(np.hstack((np.zeros(self.output_dim),
np.zeros(self.output_dim),
np.zeros(self.output_dim))),
name='{}_b'.format(self.name))
self.trainable_weights = [self.W, self.U, self.b]
else:
self.W_z = self.init((self.input_dim, self.output_dim),
name='{}_W_z'.format(self.name))
self.U_z = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_z'.format(self.name))
self.b_z = K.zeros((self.output_dim,), name='{}_b_z'.format(self.name))
self.W_r = self.init((self.input_dim, self.output_dim),
name='{}_W_r'.format(self.name))
self.U_r = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_r'.format(self.name))
self.b_r = K.zeros((self.output_dim,), name='{}_b_r'.format(self.name))
self.W_h = self.init((self.input_dim, self.output_dim),
name='{}_W_h'.format(self.name))
self.U_h = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_h'.format(self.name))
self.b_h = K.zeros((self.output_dim,), name='{}_b_h'.format(self.name))
self.trainable_weights = [self.W_z, self.U_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h]
self.W = K.concatenate([self.W_z, self.W_r, self.W_h])
self.U = K.concatenate([self.U_z, self.U_r, self.U_h])
self.b = K.concatenate([self.b_z, self.b_r, self.b_h])
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim))]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_z = time_distributed_dense(x, self.W_z, self.b_z, self.dropout_W,
input_dim, self.output_dim, timesteps)
x_r = time_distributed_dense(x, self.W_r, self.b_r, self.dropout_W,
input_dim, self.output_dim, timesteps)
x_h = time_distributed_dense(x, self.W_h, self.b_h, self.dropout_W,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_z, x_r, x_h], axis=2)
else:
return x
def step(self, x, states):
h_tm1 = states[0] # previous memory
B_U = states[1] # dropout matrices for recurrent units
B_W = states[2]
if self.consume_less == 'gpu':
matrix_x = K.dot(x * B_W[0], self.W) + self.b
matrix_inner = K.dot(h_tm1 * B_U[0], self.U[:, :2 * self.output_dim])
x_z = matrix_x[:, :self.output_dim]
x_r = matrix_x[:, self.output_dim: 2 * self.output_dim]
inner_z = matrix_inner[:, :self.output_dim]
inner_r = matrix_inner[:, self.output_dim: 2 * self.output_dim]
z = self.inner_activation(x_z + inner_z)
r = self.inner_activation(x_r + inner_r)
x_h = matrix_x[:, 2 * self.output_dim:]
inner_h = K.dot(r * h_tm1 * B_U[0], self.U[:, 2 * self.output_dim:])
hh = self.activation(x_h + inner_h)
else:
if self.consume_less == 'cpu':
x_z = x[:, :self.output_dim]
x_r = x[:, self.output_dim: 2 * self.output_dim]
x_h = x[:, 2 * self.output_dim:]
elif self.consume_less == 'mem':
x_z = K.dot(x * B_W[0], self.W_z) + self.b_z
x_r = K.dot(x * B_W[1], self.W_r) + self.b_r
x_h = K.dot(x * B_W[2], self.W_h) + self.b_h
else:
raise Exception('Unknown `consume_less` mode.')
z = self.inner_activation(x_z + K.dot(h_tm1 * B_U[0], self.U_z))
r = self.inner_activation(x_r + K.dot(h_tm1 * B_U[1], self.U_r))
hh = self.activation(x_h + K.dot(r * h_tm1 * B_U[2], self.U_h))
h = z * h_tm1 + (1 - z) * hh
return h, [h]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'activation': self.activation.__name__,
'inner_activation': self.inner_activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(GRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LSTM(Recurrent):
'''Long-Short Term Memory unit - Hochreiter 1997.
For a step-by-step description of the algorithm, see
[this tutorial](http://deeplearning.net/tutorial/lstm.html).
# Arguments
output_dim: dimension of the internal projections and the final output.
init: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
inner_init: initialization function of the inner cells.
forget_bias_init: initialization function for the bias of the forget gate.
[Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
recommend initializing with ones.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
inner_activation: activation function for the inner cells.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_W: float between 0 and 1. Fraction of the input units to drop for input gates.
dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections.
# References
- [Long short-term memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf) (original 1997 paper)
- [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labelling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
'''
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(LSTM, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
self.input_dim = input_shape[2]
if self.stateful:
self.reset_states()
else:
# initial states: 2 all-zero tensors of shape (output_dim)
self.states = [None, None]
if self.consume_less == 'gpu':
self.W = self.init((self.input_dim, 4 * self.output_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.output_dim, 4 * self.output_dim),
name='{}_U'.format(self.name))
self.b = K.variable(np.hstack((np.zeros(self.output_dim),
K.get_value(self.forget_bias_init((self.output_dim,))),
np.zeros(self.output_dim),
np.zeros(self.output_dim))),
name='{}_b'.format(self.name))
self.trainable_weights = [self.W, self.U, self.b]
else:
self.W_i = self.init((self.input_dim, self.output_dim),
name='{}_W_i'.format(self.name))
self.U_i = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_i'.format(self.name))
self.b_i = K.zeros((self.output_dim,), name='{}_b_i'.format(self.name))
self.W_f = self.init((self.input_dim, self.output_dim),
name='{}_W_f'.format(self.name))
self.U_f = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_f'.format(self.name))
self.b_f = self.forget_bias_init((self.output_dim,),
name='{}_b_f'.format(self.name))
self.W_c = self.init((self.input_dim, self.output_dim),
name='{}_W_c'.format(self.name))
self.U_c = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_c'.format(self.name))
self.b_c = K.zeros((self.output_dim,), name='{}_b_c'.format(self.name))
self.W_o = self.init((self.input_dim, self.output_dim),
name='{}_W_o'.format(self.name))
self.U_o = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_o'.format(self.name))
self.b_o = K.zeros((self.output_dim,), name='{}_b_o'.format(self.name))
self.trainable_weights = [self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_f, self.U_f, self.b_f,
self.W_o, self.U_o, self.b_o]
self.W = K.concatenate([self.W_i, self.W_f, self.W_c, self.W_o])
self.U = K.concatenate([self.U_i, self.U_f, self.U_c, self.U_o])
self.b = K.concatenate([self.b_i, self.b_f, self.b_c, self.b_o])
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
K.set_value(self.states[1],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim)),
K.zeros((input_shape[0], self.output_dim))]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
if 0 < self.dropout_W < 1:
dropout = self.dropout_W
else:
dropout = 0
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
input_dim, self.output_dim, timesteps)
x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
input_dim, self.output_dim, timesteps)
x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
input_dim, self.output_dim, timesteps)
x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return x
def step(self, x, states):
h_tm1 = states[0]
c_tm1 = states[1]
B_U = states[2]
B_W = states[3]
if self.consume_less == 'gpu':
z = K.dot(x * B_W[0], self.W) + K.dot(h_tm1 * B_U[0], self.U) + self.b
z0 = z[:, :self.output_dim]
z1 = z[:, self.output_dim: 2 * self.output_dim]
z2 = z[:, 2 * self.output_dim: 3 * self.output_dim]
z3 = z[:, 3 * self.output_dim:]
i = self.inner_activation(z0)
f = self.inner_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.inner_activation(z3)
else:
if self.consume_less == 'cpu':
x_i = x[:, :self.output_dim]
x_f = x[:, self.output_dim: 2 * self.output_dim]
x_c = x[:, 2 * self.output_dim: 3 * self.output_dim]
x_o = x[:, 3 * self.output_dim:]
elif self.consume_less == 'mem':
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_f = K.dot(x * B_W[1], self.W_f) + self.b_f
x_c = K.dot(x * B_W[2], self.W_c) + self.b_c
x_o = K.dot(x * B_W[3], self.W_o) + self.b_o
else:
raise Exception('Unknown `consume_less` mode.')
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
f = self.inner_activation(x_f + K.dot(h_tm1 * B_U[1], self.U_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1 * B_U[2], self.U_c))
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[3], self.U_o))
h = o * self.activation(c)
return h, [h, c]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'forget_bias_init': self.forget_bias_init.__name__,
'activation': self.activation.__name__,
'inner_activation': self.inner_activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(LSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
{
"content_hash": "eb79d11e68b2d00f3c83698b1cb6f3f5",
"timestamp": "",
"source": "github",
"line_count": 860,
"max_line_length": 130,
"avg_line_length": 46.98837209302326,
"alnum_prop": 0.5570403365503588,
"repo_name": "relh/keras",
"id": "0cd3c9d9212c4dbc87218749b215c47c96ba705a",
"size": "40436",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keras/layers/recurrent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "697"
},
{
"name": "Python",
"bytes": "905485"
}
],
"symlink_target": ""
}
|
"""
Utilities for NetApp drivers.
This module contains common utilities to be used by one or more
NetApp drivers to achieve the desired functionality.
"""
import decimal
import platform
import socket
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import importutils
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LW, _LI
from cinder import utils
from cinder import version
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
OPENSTACK_PREFIX = 'openstack-'
OBSOLETE_SSC_SPECS = {'netapp:raid_type': 'netapp_raid_type',
'netapp:disk_type': 'netapp_disk_type'}
DEPRECATED_SSC_SPECS = {'netapp_unmirrored': 'netapp_mirrored',
'netapp_nodedup': 'netapp_dedup',
'netapp_nocompression': 'netapp_compression',
'netapp_thick_provisioned': 'netapp_thin_provisioned'}
QOS_KEYS = frozenset(
['maxIOPS', 'total_iops_sec', 'maxBPS', 'total_bytes_sec'])
BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both'])
def validate_instantiation(**kwargs):
"""Checks if a driver is instantiated other than by the unified driver.
Helps check direct instantiation of netapp drivers.
Call this function in every netapp block driver constructor.
"""
if kwargs and kwargs.get('netapp_mode') == 'proxy':
return
LOG.warning(_LW("It is not the recommended way to use drivers by NetApp. "
"Please use NetAppDriver to achieve the functionality."))
def check_flags(required_flags, configuration):
"""Ensure that the flags we care about are set."""
for flag in required_flags:
if not getattr(configuration, flag, None):
msg = _('Configuration value %s is not set.') % flag
raise exception.InvalidInput(reason=msg)
def check_netapp_lib():
if not importutils.try_import('netapp_lib'):
msg = ('You have not installed the NetApp API Library for OpenStack. '
'Please install it using "sudo pip install netapp-lib" and '
'restart this service!')
raise exception.NetAppDriverException(msg)
def to_bool(val):
"""Converts true, yes, y, 1 to True, False otherwise."""
if val:
strg = six.text_type(val).lower()
if (strg == 'true' or strg == 'y'
or strg == 'yes' or strg == 'enabled'
or strg == '1'):
return True
else:
return False
else:
return False
@utils.synchronized("safe_set_attr")
def set_safe_attr(instance, attr, val):
"""Sets the attribute in a thread safe manner.
Returns if new val was set on attribute.
If attr already had the value then False.
"""
if not instance or not attr:
return False
old_val = getattr(instance, attr, None)
if val is None and old_val is None:
return False
elif val == old_val:
return False
else:
setattr(instance, attr, val)
return True
def get_volume_extra_specs(volume):
"""Provides extra specs associated with volume."""
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id')
if type_id is None:
return {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
if volume_type is None:
return {}
extra_specs = volume_type.get('extra_specs', {})
log_extra_spec_warnings(extra_specs)
return extra_specs
def resolve_hostname(hostname):
"""Resolves host name to IP address."""
res = socket.getaddrinfo(hostname, None)[0]
family, socktype, proto, canonname, sockaddr = res
return sockaddr[0]
def round_down(value, precision):
return float(decimal.Decimal(six.text_type(value)).quantize(
decimal.Decimal(precision), rounding=decimal.ROUND_DOWN))
def log_extra_spec_warnings(extra_specs):
for spec in (set(extra_specs.keys() if extra_specs else []) &
set(OBSOLETE_SSC_SPECS.keys())):
LOG.warning(_LW('Extra spec %(old)s is obsolete. Use %(new)s '
'instead.'), {'old': spec,
'new': OBSOLETE_SSC_SPECS[spec]})
for spec in (set(extra_specs.keys() if extra_specs else []) &
set(DEPRECATED_SSC_SPECS.keys())):
LOG.warning(_LW('Extra spec %(old)s is deprecated. Use %(new)s '
'instead.'), {'old': spec,
'new': DEPRECATED_SSC_SPECS[spec]})
def get_iscsi_connection_properties(lun_id, volume, iqn,
address, port):
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = '%s:%s' % (address, port)
properties['target_iqn'] = iqn
properties['target_lun'] = int(lun_id)
properties['volume_id'] = volume['id']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
def validate_qos_spec(qos_spec):
"""Check validity of Cinder qos spec for our backend."""
if qos_spec is None:
return
normalized_qos_keys = [key.lower() for key in QOS_KEYS]
keylist = []
for key, value in qos_spec.items():
lower_case_key = key.lower()
if lower_case_key not in normalized_qos_keys:
msg = _('Unrecognized QOS keyword: "%s"') % key
raise exception.Invalid(msg)
keylist.append(lower_case_key)
# Modify the following check when we allow multiple settings in one spec.
if len(keylist) > 1:
msg = _('Only one limit can be set in a QoS spec.')
raise exception.Invalid(msg)
def get_volume_type_from_volume(volume):
"""Provides volume type associated with volume."""
type_id = volume.get('volume_type_id')
if type_id is None:
return {}
ctxt = context.get_admin_context()
return volume_types.get_volume_type(ctxt, type_id)
def map_qos_spec(qos_spec, volume):
"""Map Cinder QOS spec to limit/throughput-value as used in client API."""
if qos_spec is None:
return None
qos_spec = map_dict_to_lower(qos_spec)
spec = dict(policy_name=get_qos_policy_group_name(volume),
max_throughput=None)
# IOPS and BPS specifications are exclusive of one another.
if 'maxiops' in qos_spec or 'total_iops_sec' in qos_spec:
spec['max_throughput'] = '%siops' % qos_spec['maxiops']
elif 'maxbps' in qos_spec or 'total_bytes_sec' in qos_spec:
spec['max_throughput'] = '%sB/s' % qos_spec['maxbps']
return spec
def map_dict_to_lower(input_dict):
"""Return an equivalent to the input dictionary with lower-case keys."""
lower_case_dict = {}
for key in input_dict:
lower_case_dict[key.lower()] = input_dict[key]
return lower_case_dict
def get_qos_policy_group_name(volume):
"""Return the name of backend QOS policy group based on its volume id."""
if 'id' in volume:
return OPENSTACK_PREFIX + volume['id']
return None
def get_qos_policy_group_name_from_info(qos_policy_group_info):
"""Return the name of a QOS policy group given qos policy group info."""
if qos_policy_group_info is None:
return None
legacy = qos_policy_group_info.get('legacy')
if legacy is not None:
return legacy['policy_name']
spec = qos_policy_group_info.get('spec')
if spec is not None:
return spec['policy_name']
return None
def get_valid_qos_policy_group_info(volume, extra_specs=None):
"""Given a volume, return information for QOS provisioning."""
info = dict(legacy=None, spec=None)
try:
volume_type = get_volume_type_from_volume(volume)
except KeyError:
LOG.exception(_LE('Cannot get QoS spec for volume %s.'), volume['id'])
return info
if volume_type is None:
return info
if extra_specs is None:
extra_specs = volume_type.get('extra_specs', {})
info['legacy'] = get_legacy_qos_policy(extra_specs)
info['spec'] = get_valid_backend_qos_spec_from_volume_type(volume,
volume_type)
msg = 'QoS policy group info for volume %(vol)s: %(info)s'
LOG.debug(msg, {'vol': volume['name'], 'info': info})
check_for_invalid_qos_spec_combination(info, volume_type)
return info
def get_valid_backend_qos_spec_from_volume_type(volume, volume_type):
"""Given a volume type, return the associated Cinder QoS spec."""
spec_key_values = get_backend_qos_spec_from_volume_type(volume_type)
if spec_key_values is None:
return None
validate_qos_spec(spec_key_values)
return map_qos_spec(spec_key_values, volume)
def get_backend_qos_spec_from_volume_type(volume_type):
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is None:
return None
ctxt = context.get_admin_context()
qos_spec = qos_specs.get_qos_specs(ctxt, qos_specs_id)
if qos_spec is None:
return None
consumer = qos_spec['consumer']
# Front end QoS specs are handled by libvirt and we ignore them here.
if consumer not in BACKEND_QOS_CONSUMERS:
return None
spec_key_values = qos_spec['specs']
return spec_key_values
def check_for_invalid_qos_spec_combination(info, volume_type):
"""Invalidate QOS spec if both legacy and non-legacy info is present."""
if info['legacy'] and info['spec']:
msg = _('Conflicting QoS specifications in volume type '
'%s: when QoS spec is associated to volume '
'type, legacy "netapp:qos_policy_group" is not allowed in '
'the volume type extra specs.') % volume_type['id']
raise exception.Invalid(msg)
def get_legacy_qos_policy(extra_specs):
"""Return legacy qos policy information if present in extra specs."""
external_policy_name = extra_specs.get('netapp:qos_policy_group')
if external_policy_name is None:
return None
return dict(policy_name=external_policy_name)
class hashabledict(dict):
"""A hashable dictionary that is comparable (i.e. in unit tests, etc.)"""
def __hash__(self):
return hash(tuple(sorted(self.items())))
class OpenStackInfo(object):
"""OS/distribution, release, and version.
NetApp uses these fields as content for EMS log entry.
"""
PACKAGE_NAME = 'python-cinder'
def __init__(self):
self._version = 'unknown version'
self._release = 'unknown release'
self._vendor = 'unknown vendor'
self._platform = 'unknown platform'
def _update_version_from_version_string(self):
try:
self._version = version.version_info.version_string()
except Exception:
pass
def _update_release_from_release_string(self):
try:
self._release = version.version_info.release_string()
except Exception:
pass
def _update_platform(self):
try:
self._platform = platform.platform()
except Exception:
pass
@staticmethod
def _get_version_info_version():
return version.version_info.version
@staticmethod
def _get_version_info_release():
return version.version_info.release
def _update_info_from_version_info(self):
try:
ver = self._get_version_info_version()
if ver:
self._version = ver
except Exception:
pass
try:
rel = self._get_version_info_release()
if rel:
self._release = rel
except Exception:
pass
# RDO, RHEL-OSP, Mirantis on Redhat, SUSE
def _update_info_from_rpm(self):
LOG.debug('Trying rpm command.')
try:
out, err = putils.execute("rpm", "-q", "--queryformat",
"'%{version}\t%{release}\t%{vendor}'",
self.PACKAGE_NAME)
if not out:
LOG.info(_LI('No rpm info found for %(pkg)s package.'), {
'pkg': self.PACKAGE_NAME})
return False
parts = out.split()
self._version = parts[0]
self._release = parts[1]
self._vendor = ' '.join(parts[2::])
return True
except Exception as e:
LOG.info(_LI('Could not run rpm command: %(msg)s.'), {'msg': e})
return False
# ubuntu, mirantis on ubuntu
def _update_info_from_dpkg(self):
LOG.debug('Trying dpkg-query command.')
try:
_vendor = None
out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'",
self.PACKAGE_NAME)
if not out:
LOG.info(_LI('No dpkg-query info found for %(pkg)s package.'),
{'pkg': self.PACKAGE_NAME})
return False
# debian format: [epoch:]upstream_version[-debian_revision]
deb_version = out
# in case epoch or revision is missing, copy entire string
_release = deb_version
if ':' in deb_version:
deb_epoch, upstream_version = deb_version.split(':')
_release = upstream_version
if '-' in deb_version:
deb_revision = deb_version.split('-')[1]
_vendor = deb_revision
self._release = _release
if _vendor:
self._vendor = _vendor
return True
except Exception as e:
LOG.info(_LI('Could not run dpkg-query command: %(msg)s.'), {
'msg': e})
return False
def _update_openstack_info(self):
self._update_version_from_version_string()
self._update_release_from_release_string()
self._update_platform()
# some distributions override with more meaningful information
self._update_info_from_version_info()
# see if we have still more targeted info from rpm or apt
found_package = self._update_info_from_rpm()
if not found_package:
self._update_info_from_dpkg()
def info(self):
self._update_openstack_info()
return '%(version)s|%(release)s|%(vendor)s|%(platform)s' % {
'version': self._version, 'release': self._release,
'vendor': self._vendor, 'platform': self._platform}
class Features(object):
def __init__(self):
self.defined_features = set()
def add_feature(self, name, supported=True, min_version=None):
if not isinstance(supported, bool):
raise TypeError("Feature value must be a bool type.")
self.defined_features.add(name)
setattr(self, name, FeatureState(supported, min_version))
def __getattr__(self, name):
# NOTE(cknight): Needed to keep pylint happy.
raise AttributeError
class FeatureState(object):
def __init__(self, supported=True, minimum_version=None):
"""Represents the current state of enablement for a Feature
:param supported: True if supported, false otherwise
:param minimum_version: The minimum version that this feature is
suported at
"""
self.supported = supported
self.minimum_version = minimum_version
def __nonzero__(self):
"""Allow a FeatureState object to be tested for truth value
:return True if the feature is supported, otherwise False
"""
return self.supported
|
{
"content_hash": "d416bcb4674e6c9a2b26286295866c2d",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 78,
"avg_line_length": 34.84531590413943,
"alnum_prop": 0.6024759284731774,
"repo_name": "duhzecca/cinder",
"id": "d66e890ea22bb7a6df7834e3d3d18f103d095ced",
"size": "16796",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/netapp/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13073417"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
}
|
"""Pushsafer platform for notify component."""
import base64
import logging
import mimetypes
import requests
from requests.auth import HTTPBasicAuth
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
_LOGGER = logging.getLogger(__name__)
_RESOURCE = "https://www.pushsafer.com/api"
_ALLOWED_IMAGES = ["image/gif", "image/jpeg", "image/png"]
CONF_DEVICE_KEY = "private_key"
CONF_TIMEOUT = 15
# Top level attributes in 'data'
ATTR_SOUND = "sound"
ATTR_VIBRATION = "vibration"
ATTR_ICON = "icon"
ATTR_ICONCOLOR = "iconcolor"
ATTR_URL = "url"
ATTR_URLTITLE = "urltitle"
ATTR_TIME2LIVE = "time2live"
ATTR_PRIORITY = "priority"
ATTR_RETRY = "retry"
ATTR_EXPIRE = "expire"
ATTR_ANSWER = "answer"
ATTR_PICTURE1 = "picture1"
# Attributes contained in picture1
ATTR_PICTURE1_URL = "url"
ATTR_PICTURE1_PATH = "path"
ATTR_PICTURE1_USERNAME = "username"
ATTR_PICTURE1_PASSWORD = "password"
ATTR_PICTURE1_AUTH = "auth"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_DEVICE_KEY): cv.string})
def get_service(hass, config, discovery_info=None):
"""Get the Pushsafer.com notification service."""
return PushsaferNotificationService(
config.get(CONF_DEVICE_KEY), hass.config.is_allowed_path
)
class PushsaferNotificationService(BaseNotificationService):
"""Implementation of the notification service for Pushsafer.com."""
def __init__(self, private_key, is_allowed_path):
"""Initialize the service."""
self._private_key = private_key
self.is_allowed_path = is_allowed_path
def send_message(self, message="", **kwargs):
"""Send a message to specified target."""
if kwargs.get(ATTR_TARGET) is None:
targets = ["a"]
_LOGGER.debug("No target specified. Sending push to all")
else:
targets = kwargs.get(ATTR_TARGET)
_LOGGER.debug("%s target(s) specified", len(targets))
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA, {})
# Converting the specified image to base64
picture1 = data.get(ATTR_PICTURE1)
picture1_encoded = ""
if picture1 is not None:
_LOGGER.debug("picture1 is available")
url = picture1.get(ATTR_PICTURE1_URL, None)
local_path = picture1.get(ATTR_PICTURE1_PATH, None)
username = picture1.get(ATTR_PICTURE1_USERNAME)
password = picture1.get(ATTR_PICTURE1_PASSWORD)
auth = picture1.get(ATTR_PICTURE1_AUTH)
if url is not None:
_LOGGER.debug("Loading image from url %s", url)
picture1_encoded = self.load_from_url(url, username, password, auth)
elif local_path is not None:
_LOGGER.debug("Loading image from file %s", local_path)
picture1_encoded = self.load_from_file(local_path)
else:
_LOGGER.warning("missing url or local_path for picture1")
else:
_LOGGER.debug("picture1 is not specified")
payload = {
"k": self._private_key,
"t": title,
"m": message,
"s": data.get(ATTR_SOUND, ""),
"v": data.get(ATTR_VIBRATION, ""),
"i": data.get(ATTR_ICON, ""),
"c": data.get(ATTR_ICONCOLOR, ""),
"u": data.get(ATTR_URL, ""),
"ut": data.get(ATTR_URLTITLE, ""),
"l": data.get(ATTR_TIME2LIVE, ""),
"pr": data.get(ATTR_PRIORITY, ""),
"re": data.get(ATTR_RETRY, ""),
"ex": data.get(ATTR_EXPIRE, ""),
"a": data.get(ATTR_ANSWER, ""),
"p": picture1_encoded,
}
for target in targets:
payload["d"] = target
response = requests.post(_RESOURCE, data=payload, timeout=CONF_TIMEOUT)
if response.status_code != 200:
_LOGGER.error("Pushsafer failed with: %s", response.text)
else:
_LOGGER.debug("Push send: %s", response.json())
@classmethod
def get_base64(cls, filebyte, mimetype):
"""Convert the image to the expected base64 string of pushsafer."""
if mimetype not in _ALLOWED_IMAGES:
_LOGGER.warning("%s is a not supported mimetype for images", mimetype)
return None
base64_image = base64.b64encode(filebyte).decode("utf8")
return "data:{};base64,{}".format(mimetype, base64_image)
def load_from_url(self, url=None, username=None, password=None, auth=None):
"""Load image/document/etc from URL."""
if url is not None:
_LOGGER.debug("Downloading image from %s", url)
if username is not None and password is not None:
auth_ = HTTPBasicAuth(username, password)
response = requests.get(url, auth=auth_, timeout=CONF_TIMEOUT)
else:
response = requests.get(url, timeout=CONF_TIMEOUT)
return self.get_base64(response.content, response.headers["content-type"])
_LOGGER.warning("url not found in param")
return None
def load_from_file(self, local_path=None):
"""Load image/document/etc from a local path."""
try:
if local_path is not None:
_LOGGER.debug("Loading image from local path")
if self.is_allowed_path(local_path):
file_mimetype = mimetypes.guess_type(local_path)
_LOGGER.debug("Detected mimetype %s", file_mimetype)
with open(local_path, "rb") as binary_file:
data = binary_file.read()
return self.get_base64(data, file_mimetype[0])
else:
_LOGGER.warning("Local path not found in params!")
except OSError as error:
_LOGGER.error("Can't load from local path: %s", error)
return None
|
{
"content_hash": "cd0b7cea2eb96f48bfd484a1e9096ed5",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 86,
"avg_line_length": 36.544910179640716,
"alnum_prop": 0.6006881861379649,
"repo_name": "fbradyirl/home-assistant",
"id": "461b2540beff454e6abb3d42c12157661a25ff96",
"size": "6103",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/pushsafer/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
}
|
import argparse
import os
import sys
from bakery_cli.ttfont import Font
from bakery_cli.scripts.fstype import reset_fstype
from fontTools.ttLib import TTLibError
parser = argparse.ArgumentParser()
parser.add_argument('filename',
help="Font file in OpenType (TTF/OTF) format")
parser.add_argument('--autofix', action="store_true",
help="Autofix font metrics")
args = parser.parse_args()
assert os.path.exists(args.filename)
if args.autofix:
reset_fstype(args.filename)
else:
try:
font = Font(args.filename)
except TTLibError, ex:
print >> sys.stderr, "ERROR: %s" % ex
exit(1)
print(font.fstype)
|
{
"content_hash": "22a0354e103b2fe8347900d027310d37",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 66,
"avg_line_length": 25.14814814814815,
"alnum_prop": 0.6730486008836525,
"repo_name": "lowks/fontbakery-cli",
"id": "89f4bc34abc6ed2bb39f274fd44ab5050e592d85",
"size": "1399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/bakery-fstype-fix.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from __future__ import with_statement
import ConfigParser
import copy
import errno
import glob
import logging
import os
import pprint
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import thread
import threading
import time
import traceback
import types
import unittest.case
from collections import OrderedDict
from subprocess import CalledProcessError
from unittest import TestCase
import ccmlib.repository
from cassandra import ConsistencyLevel
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster as PyCluster
from cassandra.cluster import NoHostAvailable
from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy
from ccmlib.cluster import Cluster
from ccmlib.cluster_factory import ClusterFactory
from ccmlib.common import get_version_from_build, is_win
from ccmlib.node import TimeoutError
from nose.exc import SkipTest
from six import print_
from plugins.dtestconfig import _CONFIG as CONFIG
# We don't want test files to know about the plugins module, so we import
# constants here and re-export them.
from plugins.dtestconfig import GlobalConfigObject
from utils.funcutils import merge_dicts
LOG_SAVED_DIR = "logs"
try:
os.mkdir(LOG_SAVED_DIR)
except OSError:
pass
LAST_LOG = os.path.join(LOG_SAVED_DIR, "last")
LAST_TEST_DIR = 'last_test_dir'
DEFAULT_DIR = './'
config = ConfigParser.RawConfigParser()
if len(config.read(os.path.expanduser('~/.cassandra-dtest'))) > 0:
if config.has_option('main', 'default_dir'):
DEFAULT_DIR = os.path.expanduser(config.get('main', 'default_dir'))
CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR', DEFAULT_DIR)
NO_SKIP = os.environ.get('SKIP', '').lower() in ('no', 'false')
DEBUG = os.environ.get('DEBUG', '').lower() in ('yes', 'true')
TRACE = os.environ.get('TRACE', '').lower() in ('yes', 'true')
KEEP_LOGS = os.environ.get('KEEP_LOGS', '').lower() in ('yes', 'true')
KEEP_TEST_DIR = os.environ.get('KEEP_TEST_DIR', '').lower() in ('yes', 'true')
PRINT_DEBUG = os.environ.get('PRINT_DEBUG', '').lower() in ('yes', 'true')
OFFHEAP_MEMTABLES = os.environ.get('OFFHEAP_MEMTABLES', '').lower() in ('yes', 'true')
NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')
RECORD_COVERAGE = os.environ.get('RECORD_COVERAGE', '').lower() in ('yes', 'true')
REUSE_CLUSTER = os.environ.get('REUSE_CLUSTER', '').lower() in ('yes', 'true')
SILENCE_DRIVER_ON_SHUTDOWN = os.environ.get('SILENCE_DRIVER_ON_SHUTDOWN', 'true').lower() in ('yes', 'true')
IGNORE_REQUIRE = os.environ.get('IGNORE_REQUIRE', '').lower() in ('yes', 'true')
DATADIR_COUNT = os.environ.get('DATADIR_COUNT', '3')
ENABLE_ACTIVE_LOG_WATCHING = os.environ.get('ENABLE_ACTIVE_LOG_WATCHING', '').lower() in ('yes', 'true')
RUN_STATIC_UPGRADE_MATRIX = os.environ.get('RUN_STATIC_UPGRADE_MATRIX', '').lower() in ('yes', 'true')
# devault values for configuration from configuration plugin
_default_config = GlobalConfigObject(
vnodes=True,
)
if CONFIG is None:
CONFIG = _default_config
DISABLE_VNODES = not CONFIG.vnodes
if os.environ.get('DISABLE_VNODES', '').lower() in ('yes', 'true'):
print 'DISABLE_VNODES environment variable deprecated. Use `./run_dtests.py --vnodes false` instead.'
CURRENT_TEST = ""
logging.basicConfig(filename=os.path.join(LOG_SAVED_DIR, "dtest.log"),
filemode='w',
format='%(asctime)s,%(msecs)d %(name)s %(current_test)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
LOG = logging.getLogger('dtest')
# set python-driver log level to INFO by default for dtest
logging.getLogger('cassandra').setLevel(logging.INFO)
def get_sha(repo_dir):
try:
output = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=repo_dir).strip()
prefix = 'git:'
if os.environ.get('LOCAL_GIT_REPO') is not None:
prefix = 'local:'
return "{}{}".format(prefix, output)
except CalledProcessError, e:
if re.search('Not a git repository', e.message) is not None:
# we tried to get a sha, but repo_dir isn't a git repo. No big deal, must just be working from a non-git install.
return None
else:
# git call failed for some unknown reason
raise
# There are times when we want to know the C* version we're testing against
# before we call Tester.setUp. In the general case, we can't know that -- the
# test method could use any version it wants for self.cluster. However, we can
# get the version from build.xml in the C* repository specified by
# CASSANDRA_VERSION or CASSANDRA_DIR. This should use the same resolution
# strategy as the actual checkout code in Tester.setUp; if it does not, that is
# a bug.
_cassandra_version_slug = os.environ.get('CASSANDRA_VERSION')
# Prefer CASSANDRA_VERSION if it's set in the environment. If not, use CASSANDRA_DIR
if _cassandra_version_slug:
# fetch but don't build the specified C* version
ccm_repo_cache_dir, _ = ccmlib.repository.setup(_cassandra_version_slug)
CASSANDRA_VERSION_FROM_BUILD = get_version_from_build(ccm_repo_cache_dir)
CASSANDRA_GITREF = get_sha(ccm_repo_cache_dir) # will be set None when not a git repo
else:
CASSANDRA_VERSION_FROM_BUILD = get_version_from_build(CASSANDRA_DIR)
CASSANDRA_GITREF = get_sha(CASSANDRA_DIR)
# Determine the location of the libjemalloc jar so that we can specify it
# through environment variables when start Cassandra. This reduces startup
# time, making the dtests run faster.
def find_libjemalloc():
if is_win():
# let the normal bat script handle finding libjemalloc
return ""
this_dir = os.path.dirname(os.path.realpath(__file__))
script = os.path.join(this_dir, "findlibjemalloc.sh")
try:
p = subprocess.Popen([script], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr or not stdout:
return "-" # tells C* not to look for libjemalloc
else:
return stdout
except Exception as exc:
print "Failed to run script to prelocate libjemalloc ({}): {}".format(script, exc)
return ""
CASSANDRA_LIBJEMALLOC = find_libjemalloc()
class expect_control_connection_failures(object):
"""
We're just using a class here as a one-off object with a filter method, for
use as a filter object in the driver logger. It's frustrating that we can't
just pass in a function, but we need an object with a .filter method. Oh
well, I guess that's what old stdlib libraries are like.
"""
@staticmethod
def filter(record):
expected_strings = [
'Control connection failed to connect, shutting down Cluster:',
'[control connection] Error connecting to '
]
for s in expected_strings:
if s in record.msg or s in record.name:
return False
return True
# copy the initial environment variables so we can reset them later:
initial_environment = copy.deepcopy(os.environ)
class DtestTimeoutError(Exception):
pass
def reset_environment_vars():
os.environ.clear()
os.environ.update(initial_environment)
def warning(msg):
LOG.warning(msg, extra={"current_test": CURRENT_TEST})
if PRINT_DEBUG:
print "WARN: " + msg
def debug(msg):
LOG.debug(msg, extra={"current_test": CURRENT_TEST})
if PRINT_DEBUG:
print msg
def retry_till_success(fun, *args, **kwargs):
timeout = kwargs.pop('timeout', 60)
bypassed_exception = kwargs.pop('bypassed_exception', Exception)
deadline = time.time() + timeout
while True:
try:
return fun(*args, **kwargs)
except bypassed_exception:
if time.time() > deadline:
raise
else:
# brief pause before next attempt
time.sleep(0.25)
class FlakyRetryPolicy(RetryPolicy):
"""
A retry policy that retries 5 times by default, but can be configured to
retry more times.
"""
def __init__(self, max_retries=5):
self.max_retries = max_retries
def on_read_timeout(self, *args, **kwargs):
if kwargs['retry_num'] < self.max_retries:
debug("Retrying read after timeout. Attempt #" + str(kwargs['retry_num']))
return (self.RETRY, None)
else:
return (self.RETHROW, None)
def on_write_timeout(self, *args, **kwargs):
if kwargs['retry_num'] < self.max_retries:
debug("Retrying write after timeout. Attempt #" + str(kwargs['retry_num']))
return (self.RETRY, None)
else:
return (self.RETHROW, None)
def on_unavailable(self, *args, **kwargs):
if kwargs['retry_num'] < self.max_retries:
debug("Retrying request after UE. Attempt #" + str(kwargs['retry_num']))
return (self.RETRY, None)
else:
return (self.RETHROW, None)
class Runner(threading.Thread):
def __init__(self, func):
threading.Thread.__init__(self)
self.__func = func
self.__error = None
self.__stopped = False
self.daemon = True
def run(self):
i = 0
while True:
if self.__stopped:
return
try:
self.__func(i)
except Exception as e:
self.__error = e
return
i = i + 1
def stop(self):
self.__stopped = True
self.join()
if self.__error is not None:
raise self.__error
def check(self):
if self.__error is not None:
raise self.__error
class Tester(TestCase):
maxDiff = None
def __init__(self, *argv, **kwargs):
# if False, then scan the log of each node for errors after every test.
self.allow_log_errors = False
self.cluster_options = kwargs.pop('cluster_options', None)
super(Tester, self).__init__(*argv, **kwargs)
def set_node_to_current_version(self, node):
version = os.environ.get('CASSANDRA_VERSION')
cdir = CASSANDRA_DIR
if version:
node.set_install_dir(version=version)
else:
node.set_install_dir(install_dir=cdir)
def init_config(self):
init_default_config(self.cluster, self.cluster_options)
def setUp(self):
self.set_current_tst_name()
kill_windows_cassandra_procs()
maybe_cleanup_cluster_from_last_test_file()
self.test_path = get_test_path()
self.cluster = create_ccm_cluster(self.test_path, name='test')
self.maybe_begin_active_log_watch()
maybe_setup_jacoco(self.test_path)
self.init_config()
write_last_test_file(self.test_path, self.cluster)
set_log_levels(self.cluster)
self.connections = []
self.runners = []
# this is intentionally spelled 'tst' instead of 'test' to avoid
# making unittest think it's a test method
def set_current_tst_name(self):
global CURRENT_TEST
CURRENT_TEST = self.id() + self._testMethodName
def maybe_begin_active_log_watch(self):
if ENABLE_ACTIVE_LOG_WATCHING:
if not self.allow_log_errors:
self.begin_active_log_watch()
def begin_active_log_watch(self):
"""
Calls into ccm to start actively watching logs.
In the event that errors are seen in logs, ccm will call back to _log_error_handler.
When the cluster is no longer in use, stop_active_log_watch should be called to end log watching.
(otherwise a 'daemon' thread will (needlessly) run until the process exits).
"""
# log watching happens in another thread, but we want it to halt the main
# thread's execution, which we have to do by registering a signal handler
signal.signal(signal.SIGINT, self._catch_interrupt)
self._log_watch_thread = self.cluster.actively_watch_logs_for_error(self._log_error_handler, interval=0.25)
def _log_error_handler(self, errordata):
"""
Callback handler used in conjunction with begin_active_log_watch.
When called, prepares exception instance, then will indirectly
cause _catch_interrupt to be called, which can raise the exception in the main
program thread.
@param errordata is a dictonary mapping node name to failure list.
"""
# in some cases self.allow_log_errors may get set after proactive log checking has been enabled
# so we need to double-check first thing before proceeding
if self.allow_log_errors:
return
reportable_errordata = OrderedDict()
for nodename, errors in errordata.items():
filtered_errors = list(self.__filter_errors(['\n'.join(msg) for msg in errors]))
if len(filtered_errors) is not 0:
reportable_errordata[nodename] = filtered_errors
# no errors worthy of halting the test
if not reportable_errordata:
return
message = "Errors seen in logs for: {nodes}".format(nodes=", ".join(reportable_errordata.keys()))
for nodename, errors in reportable_errordata.items():
for error in errors:
message += "\n{nodename}: {error}".format(nodename=nodename, error=error)
try:
debug('Errors were just seen in logs, ending test (if not ending already)!')
print_("Error details: \n{message}".format(message=message))
self.test_is_ending # will raise AttributeError if not present
except AttributeError:
self.test_is_ending = True
self.exit_with_exception = AssertionError("Log error encountered during active log scanning, see stdout")
# thread.interrupt_main will SIGINT in the main thread, which we can
# catch to raise an exception with useful information
thread.interrupt_main()
"""
Finds files matching the glob pattern specified as argument on
the given keyspace in all nodes
"""
def glob_data_dirs(self, path, ks="ks"):
result = []
for node in self.cluster.nodelist():
for data_dir in node.data_directories():
ks_dir = os.path.join(data_dir, ks, path)
result.extend(glob.glob(ks_dir))
return result
def _catch_interrupt(self, signal, frame):
"""
Signal handler for registering on SIGINT.
If called will look for a stored exception and raise it to abort test.
If a stored exception is not present, this handler has likely caught a
user interrupt via CTRL-C, and will raise a KeyboardInterrupt.
"""
try:
# check if we have a persisted exception to fail with
raise self.exit_with_exception
except AttributeError:
# looks like this was just a plain CTRL-C event
raise KeyboardInterrupt()
def copy_logs(self, cluster, directory=None, name=None):
"""Copy the current cluster's log files somewhere, by default to LOG_SAVED_DIR with a name of 'last'"""
if directory is None:
directory = LOG_SAVED_DIR
if name is None:
name = LAST_LOG
else:
name = os.path.join(directory, name)
if not os.path.exists(directory):
os.mkdir(directory)
logs = [(node.name, node.logfilename(), node.debuglogfilename(), node.gclogfilename(), node.compactionlogfilename())
for node in self.cluster.nodes.values()]
if len(logs) is not 0:
basedir = str(int(time.time() * 1000)) + '_' + self.id()
logdir = os.path.join(directory, basedir)
os.mkdir(logdir)
for n, log, debuglog, gclog, compactionlog in logs:
if os.path.exists(log):
self.assertGreaterEqual(os.path.getsize(log), 0)
shutil.copyfile(log, os.path.join(logdir, n + ".log"))
if os.path.exists(debuglog):
self.assertGreaterEqual(os.path.getsize(debuglog), 0)
shutil.copyfile(debuglog, os.path.join(logdir, n + "_debug.log"))
if os.path.exists(gclog):
self.assertGreaterEqual(os.path.getsize(gclog), 0)
shutil.copyfile(gclog, os.path.join(logdir, n + "_gc.log"))
if os.path.exists(compactionlog):
self.assertGreaterEqual(os.path.getsize(compactionlog), 0)
shutil.copyfile(compactionlog, os.path.join(logdir, n + "_compaction.log"))
if os.path.exists(name):
os.unlink(name)
if not is_win():
os.symlink(basedir, name)
def get_eager_protocol_version(self, cassandra_version):
"""
Returns the highest protocol version accepted
by the given C* version
"""
if cassandra_version >= '2.2':
protocol_version = 4
elif cassandra_version >= '2.1':
protocol_version = 3
elif cassandra_version >= '2.0':
protocol_version = 2
else:
protocol_version = 1
return protocol_version
def cql_connection(self, node, keyspace=None, user=None,
password=None, compression=True, protocol_version=None, port=None, ssl_opts=None):
return self._create_session(node, keyspace, user, password, compression,
protocol_version, port=port, ssl_opts=ssl_opts)
def exclusive_cql_connection(self, node, keyspace=None, user=None,
password=None, compression=True, protocol_version=None, port=None, ssl_opts=None):
node_ip = self.get_ip_from_node(node)
wlrr = WhiteListRoundRobinPolicy([node_ip])
return self._create_session(node, keyspace, user, password, compression,
protocol_version, wlrr, port=port, ssl_opts=ssl_opts)
def _create_session(self, node, keyspace, user, password, compression, protocol_version, load_balancing_policy=None,
port=None, ssl_opts=None):
node_ip = self.get_ip_from_node(node)
if not port:
port = self.get_port_from_node(node)
if protocol_version is None:
protocol_version = self.get_eager_protocol_version(self.cluster.version())
if user is not None:
auth_provider = self.get_auth_provider(user=user, password=password)
else:
auth_provider = None
cluster = PyCluster([node_ip], auth_provider=auth_provider, compression=compression,
protocol_version=protocol_version, load_balancing_policy=load_balancing_policy, default_retry_policy=FlakyRetryPolicy(),
port=port, ssl_options=ssl_opts, connect_timeout=10)
session = cluster.connect()
# temporarily increase client-side timeout to 1m to determine
# if the cluster is simply responding slowly to requests
session.default_timeout = 60.0
if keyspace is not None:
session.set_keyspace(keyspace)
# override driver default consistency level of LOCAL_QUORUM
session.default_consistency_level = ConsistencyLevel.ONE
self.connections.append(session)
return session
def patient_cql_connection(self, node, keyspace=None,
user=None, password=None, timeout=30, compression=True,
protocol_version=None, port=None, ssl_opts=None):
"""
Returns a connection after it stops throwing NoHostAvailables due to not being ready.
If the timeout is exceeded, the exception is raised.
"""
if is_win():
timeout *= 2
logging.getLogger('cassandra.cluster').addFilter(expect_control_connection_failures)
try:
session = retry_till_success(
self.cql_connection,
node,
keyspace=keyspace,
user=user,
password=password,
timeout=timeout,
compression=compression,
protocol_version=protocol_version,
port=port,
ssl_opts=ssl_opts,
bypassed_exception=NoHostAvailable
)
finally:
logging.getLogger('cassandra.cluster').removeFilter(expect_control_connection_failures)
return session
def patient_exclusive_cql_connection(self, node, keyspace=None,
user=None, password=None, timeout=30, compression=True,
protocol_version=None, port=None, ssl_opts=None):
"""
Returns a connection after it stops throwing NoHostAvailables due to not being ready.
If the timeout is exceeded, the exception is raised.
"""
if is_win():
timeout *= 2
return retry_till_success(
self.exclusive_cql_connection,
node,
keyspace=keyspace,
user=user,
password=password,
timeout=timeout,
compression=compression,
protocol_version=protocol_version,
port=port,
ssl_opts=ssl_opts,
bypassed_exception=NoHostAvailable
)
def create_ks(self, session, name, rf):
query = 'CREATE KEYSPACE %s WITH replication={%s}'
if isinstance(rf, types.IntType):
# we assume simpleStrategy
session.execute(query % (name, "'class':'SimpleStrategy', 'replication_factor':%d" % rf))
else:
self.assertGreaterEqual(len(rf), 0, "At least one datacenter/rf pair is needed")
# we assume networkTopologyStrategy
options = (', ').join(['\'%s\':%d' % (d, r) for d, r in rf.iteritems()])
session.execute(query % (name, "'class':'NetworkTopologyStrategy', %s" % options))
session.execute('USE {}'.format(name))
# We default to UTF8Type because it's simpler to use in tests
def create_cf(self, session, name, key_type="varchar", speculative_retry=None, read_repair=None, compression=None,
gc_grace=None, columns=None, validation="UTF8Type", compact_storage=False):
additional_columns = ""
if columns is not None:
for k, v in columns.items():
additional_columns = "{}, {} {}".format(additional_columns, k, v)
if additional_columns == "":
query = 'CREATE COLUMNFAMILY %s (key %s, c varchar, v varchar, PRIMARY KEY(key, c)) WITH comment=\'test cf\'' % (name, key_type)
else:
query = 'CREATE COLUMNFAMILY %s (key %s PRIMARY KEY%s) WITH comment=\'test cf\'' % (name, key_type, additional_columns)
if compression is not None:
query = '%s AND compression = { \'sstable_compression\': \'%sCompressor\' }' % (query, compression)
else:
# if a compression option is omitted, C* will default to lz4 compression
query += ' AND compression = {}'
if read_repair is not None:
query = '%s AND read_repair_chance=%f AND dclocal_read_repair_chance=%f' % (query, read_repair, read_repair)
if gc_grace is not None:
query = '%s AND gc_grace_seconds=%d' % (query, gc_grace)
if speculative_retry is not None:
query = '%s AND speculative_retry=\'%s\'' % (query, speculative_retry)
if compact_storage:
query += ' AND COMPACT STORAGE'
session.execute(query)
time.sleep(0.2)
@classmethod
def tearDownClass(cls):
reset_environment_vars()
if os.path.exists(LAST_TEST_DIR):
with open(LAST_TEST_DIR) as f:
test_path = f.readline().strip('\n')
name = f.readline()
try:
cluster = ClusterFactory.load(test_path, name)
# Avoid waiting too long for node to be marked down
if KEEP_TEST_DIR:
cluster.stop(gently=RECORD_COVERAGE)
else:
cluster.remove()
os.rmdir(test_path)
except IOError:
# after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here
pass
try:
os.remove(LAST_TEST_DIR)
except IOError:
# Ignore - see comment above
pass
def tearDown(self):
# test_is_ending prevents active log watching from being able to interrupt the test
# which we don't want to happen once tearDown begins
self.test_is_ending = True
reset_environment_vars()
for con in self.connections:
con.cluster.shutdown()
for runner in self.runners:
try:
runner.stop()
except:
pass
failed = did_fail()
try:
if not self.allow_log_errors and self.check_logs_for_errors():
failed = True
raise AssertionError('Unexpected error in log, see stdout')
finally:
try:
# save the logs for inspection
if failed or KEEP_LOGS:
self.copy_logs(self.cluster)
except Exception as e:
print "Error saving log:", str(e)
finally:
log_watch_thread = getattr(self, '_log_watch_thread', None)
cleanup_cluster(self.cluster, self.test_path, log_watch_thread)
def check_logs_for_errors(self):
for node in self.cluster.nodelist():
errors = list(self.__filter_errors(
['\n'.join(msg) for msg in node.grep_log_for_errors()]))
if len(errors) is not 0:
for error in errors:
print_("Unexpected error in {node_name} log, error: \n{error}".format(node_name=node.name, error=error))
return True
def go(self, func):
runner = Runner(func)
self.runners.append(runner)
runner.start()
return runner
def skip(self, msg):
if not NO_SKIP:
raise SkipTest(msg)
def __filter_errors(self, errors):
"""Filter errors, removing those that match self.ignore_log_patterns"""
if not hasattr(self, 'ignore_log_patterns'):
self.ignore_log_patterns = []
for e in errors:
for pattern in self.ignore_log_patterns:
if re.search(pattern, e):
break
else:
yield e
def get_ip_from_node(self, node):
if node.network_interfaces['binary']:
node_ip = node.network_interfaces['binary'][0]
else:
node_ip = node.network_interfaces['thrift'][0]
return node_ip
def get_port_from_node(self, node):
"""
Return the port that this node is listening on.
We only use this to connect the native driver,
so we only care about the binary port.
"""
try:
return node.network_interfaces['binary'][1]
except Exception:
raise RuntimeError("No network interface defined on this node object. {}".format(node.network_interfaces))
def get_auth_provider(self, user, password):
return PlainTextAuthProvider(username=user, password=password)
def make_auth(self, user, password):
def private_auth(node_ip):
return {'username': user, 'password': password}
return private_auth
# Disable docstrings printing in nosetest output
def shortDescription(self):
return None
def wait_for_any_log(self, nodes, pattern, timeout, filename='system.log'):
"""
Look for a pattern in the system.log of any in a given list
of nodes.
@param nodes The list of nodes whose logs to scan
@param pattern The target pattern
@param timeout How long to wait for the pattern. Note that
strictly speaking, timeout is not really a timeout,
but a maximum number of attempts. This implies that
the all the grepping takes no time at all, so it is
somewhat inaccurate, but probably close enough.
@return The first node in whose log the pattern was found
"""
for _ in range(timeout):
for node in nodes:
found = node.grep_log(pattern, filename=filename)
if found:
return node
time.sleep(1)
raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) +
" Unable to find: " + pattern + " in any node log within " + str(timeout) + "s")
def get_jfr_jvm_args(self):
"""
@return The JVM arguments required for attaching flight recorder to a Java process.
"""
return ["-XX:+UnlockCommercialFeatures", "-XX:+FlightRecorder"]
def start_jfr_recording(self, nodes):
"""
Start Java flight recorder provided the cluster was started with the correct jvm arguments.
"""
for node in nodes:
p = subprocess.Popen(['jcmd', str(node.pid), 'JFR.start'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
debug(stdout)
debug(stderr)
def dump_jfr_recording(self, nodes):
"""
Save Java flight recorder results to file for analyzing with mission control.
"""
for node in nodes:
p = subprocess.Popen(['jcmd', str(node.pid), 'JFR.dump',
'recording=1', 'filename=recording_{}.jfr'.format(node.address())],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
debug(stdout)
debug(stderr)
def kill_windows_cassandra_procs():
# On Windows, forcefully terminate any leftover previously running cassandra processes. This is a temporary
# workaround until we can determine the cause of intermittent hung-open tests and file-handles.
if is_win():
try:
import psutil
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline'])
except psutil.NoSuchProcess:
pass
else:
if (pinfo['name'] == 'java.exe' and '-Dcassandra' in pinfo['cmdline']):
print 'Found running cassandra process with pid: ' + str(pinfo['pid']) + '. Killing.'
psutil.Process(pinfo['pid']).kill()
except ImportError:
debug("WARN: psutil not installed. Cannot detect and kill "
"running cassandra processes - you may see cascading dtest failures.")
def get_test_path():
test_path = tempfile.mkdtemp(prefix='dtest-')
# ccm on cygwin needs absolute path to directory - it crosses from cygwin space into
# regular Windows space on wmic calls which will otherwise break pathing
if sys.platform == "cygwin":
process = subprocess.Popen(["cygpath", "-m", test_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
test_path = process.communicate()[0].rstrip()
return test_path
# nose will discover this as a test, so we manually make it not a test
get_test_path.__test__ = False
def create_ccm_cluster(test_path, name):
debug("cluster ccm directory: " + test_path)
version = os.environ.get('CASSANDRA_VERSION')
cdir = CASSANDRA_DIR
if version:
cluster = Cluster(test_path, name, cassandra_version=version)
else:
cluster = Cluster(test_path, name, cassandra_dir=cdir)
if DISABLE_VNODES:
cluster.set_configuration_options(values={'num_tokens': None})
else:
cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS})
if OFFHEAP_MEMTABLES:
cluster.set_configuration_options(values={'memtable_allocation_type': 'offheap_objects'})
cluster.set_datadir_count(DATADIR_COUNT)
cluster.set_environment_variable('CASSANDRA_LIBJEMALLOC', CASSANDRA_LIBJEMALLOC)
return cluster
def cleanup_cluster(cluster, test_path, log_watch_thread=None):
if SILENCE_DRIVER_ON_SHUTDOWN:
# driver logging is very verbose when nodes start going down -- bump up the level
logging.getLogger('cassandra').setLevel(logging.CRITICAL)
if KEEP_TEST_DIR:
cluster.stop(gently=RECORD_COVERAGE)
else:
# when recording coverage the jvm has to exit normally
# or the coverage information is not written by the jacoco agent
# otherwise we can just kill the process
if RECORD_COVERAGE:
cluster.stop(gently=True)
# Cleanup everything:
try:
if log_watch_thread:
stop_active_log_watch(log_watch_thread)
finally:
debug("removing ccm cluster {name} at: {path}".format(name=cluster.name, path=test_path))
cluster.remove()
debug("clearing ssl stores from [{0}] directory".format(test_path))
for filename in ('keystore.jks', 'truststore.jks', 'ccm_node.cer'):
try:
os.remove(os.path.join(test_path, filename))
except OSError as e:
# once we port to py3, which has better reporting for exceptions raised while
# handling other excpetions, we should just assert e.errno == errno.ENOENT
if e.errno != errno.ENOENT: # ENOENT = no such file or directory
raise
os.rmdir(test_path)
cleanup_last_test_dir()
def cleanup_last_test_dir():
if os.path.exists(LAST_TEST_DIR):
os.remove(LAST_TEST_DIR)
def stop_active_log_watch(log_watch_thread):
"""
Joins the log watching thread, which will then exit.
Should be called after each test, ideally after nodes are stopped but before cluster files are removed.
Can be called multiple times without error.
If not called, log watching thread will remain running until the parent process exits.
"""
log_watch_thread.join(timeout=60)
def maybe_cleanup_cluster_from_last_test_file():
# cleaning up if a previous execution didn't trigger tearDown (which
# can happen if it is interrupted by KeyboardInterrupt)
if os.path.exists(LAST_TEST_DIR):
with open(LAST_TEST_DIR) as f:
test_path = f.readline().strip('\n')
name = f.readline()
try:
cluster = ClusterFactory.load(test_path, name)
# Avoid waiting too long for node to be marked down
cleanup_cluster(cluster, test_path)
except IOError:
# after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here
pass
def init_default_config(cluster, cluster_options):
# the failure detector can be quite slow in such tests with quick start/stop
phi_values = {'phi_convict_threshold': 5}
timeout = 10000
if cluster_options is not None:
values = merge_dicts(cluster_options, phi_values)
else:
values = merge_dicts(phi_values, {
'read_request_timeout_in_ms': timeout,
'range_request_timeout_in_ms': timeout,
'write_request_timeout_in_ms': timeout,
'truncate_request_timeout_in_ms': timeout,
'request_timeout_in_ms': timeout
})
cluster.set_configuration_options(values)
debug("Done setting configuration options:\n" + pprint.pformat(cluster._config_options, indent=4))
def write_last_test_file(test_path, cluster):
with open(LAST_TEST_DIR, 'w') as f:
f.write(test_path + '\n')
f.write(cluster.name)
def set_log_levels(cluster):
if DEBUG:
cluster.set_log_level("DEBUG")
if TRACE:
cluster.set_log_level("TRACE")
if os.environ.get('DEBUG', 'no').lower() not in ('no', 'false', 'yes', 'true'):
classes_to_debug = os.environ.get('DEBUG').split(":")
cluster.set_log_level('DEBUG', None if len(classes_to_debug) == 0 else classes_to_debug)
if os.environ.get('TRACE', 'no').lower() not in ('no', 'false', 'yes', 'true'):
classes_to_trace = os.environ.get('TRACE').split(":")
cluster.set_log_level('TRACE', None if len(classes_to_trace) == 0 else classes_to_trace)
def maybe_setup_jacoco(test_path, cluster_name='test'):
"""Setup JaCoCo code coverage support"""
if not RECORD_COVERAGE:
return
# use explicit agent and execfile locations
# or look for a cassandra build if they are not specified
cdir = CASSANDRA_DIR
agent_location = os.environ.get('JACOCO_AGENT_JAR', os.path.join(cdir, 'build/lib/jars/jacocoagent.jar'))
jacoco_execfile = os.environ.get('JACOCO_EXECFILE', os.path.join(cdir, 'build/jacoco/jacoco.exec'))
if os.path.isfile(agent_location):
debug("Jacoco agent found at {}".format(agent_location))
with open(os.path.join(
test_path, cluster_name, 'cassandra.in.sh'), 'w') as f:
f.write('JVM_OPTS="$JVM_OPTS -javaagent:{jar_path}=destfile={exec_file}"'
.format(jar_path=agent_location, exec_file=jacoco_execfile))
if os.path.isfile(jacoco_execfile):
debug("Jacoco execfile found at {}, execution data will be appended".format(jacoco_execfile))
else:
debug("Jacoco execfile will be created at {}".format(jacoco_execfile))
else:
debug("Jacoco agent not found or is not file. Execution will not be recorded.")
def did_fail():
if sys.exc_info() == (None, None, None):
return False
exc_class, _, _ = sys.exc_info()
return not issubclass(exc_class, unittest.case.SkipTest)
class ReusableClusterTester(Tester):
"""
A Tester designed for reusing the same cluster across multiple
test methods. This makes test suites with many small tests run
much, much faster. However, there are a couple of downsides:
First, test setup and teardown must be diligent about cleaning
up any data or schema elements that may interfere with other
tests.
Second, errors triggered by one test method may cascade
into other test failures. In an attempt to limit this, the
cluster will be restarted if a test fails or an exception is
caught. However, there may still be undetected problems in
Cassandra that cause cascading failures.
"""
test_path = None
cluster = None
cluster_options = None
@classmethod
def setUpClass(cls):
kill_windows_cassandra_procs()
maybe_cleanup_cluster_from_last_test_file()
cls.initialize_cluster()
def setUp(self):
self.set_current_tst_name()
self.connections = []
# TODO enable active log watching
# This needs to happen in setUp() and not setUpClass() so that individual
# test methods can set allow_log_errors and so that error handling
# only fails a single test method instead of the entire class.
# The problem with this is that ccm doesn't yet support stopping the
# active log watcher -- it runs until the cluster is destroyed. Since
# we reuse the same cluster, this doesn't work for us.
def tearDown(self):
# test_is_ending prevents active log watching from being able to interrupt the test
self.test_is_ending = True
failed = did_fail()
try:
if not self.allow_log_errors and self.check_logs_for_errors():
failed = True
raise AssertionError('Unexpected error in log, see stdout')
finally:
try:
# save the logs for inspection
if failed or KEEP_LOGS:
self.copy_logs(self.cluster)
except Exception as e:
print "Error saving log:", str(e)
finally:
reset_environment_vars()
if failed:
cleanup_cluster(self.cluster, self.test_path)
kill_windows_cassandra_procs()
self.initialize_cluster()
@classmethod
def initialize_cluster(cls):
"""
This method is responsible for initializing and configuring a ccm
cluster for the next set of tests. This can be called for two
different reasons:
* A class of tests is starting
* A test method failed/errored, so the cluster has been wiped
Subclasses that require custom initialization should generally
do so by overriding post_initialize_cluster().
"""
cls.test_path = get_test_path()
cls.cluster = create_ccm_cluster(cls.test_path, name='test')
cls.init_config()
maybe_setup_jacoco(cls.test_path)
cls.init_config()
write_last_test_file(cls.test_path, cls.cluster)
set_log_levels(cls.cluster)
cls.post_initialize_cluster()
@classmethod
def post_initialize_cluster(cls):
"""
This method is called after the ccm cluster has been created
and default config options have been applied. Any custom
initialization for a test class should generally be done
here in order to correctly handle cluster restarts after
test method failures.
"""
pass
@classmethod
def init_config(cls):
init_default_config(cls.cluster, cls.cluster_options)
def canReuseCluster(Tester):
orig_init = Tester.__init__
# make copy of original __init__, so we can call it without recursion
def __init__(self, *args, **kwargs):
self._preserve_cluster = REUSE_CLUSTER
orig_init(self, *args, **kwargs) # call the original __init__
Tester.__init__ = __init__ # set the class' __init__ to the new one
return Tester
class freshCluster():
def __call__(self, f):
def wrapped(obj):
obj._preserve_cluster = False
obj.setUp()
f(obj)
wrapped.__name__ = f.__name__
wrapped.__doc__ = f.__doc__
return wrapped
class MultiError(Exception):
"""
Extends Exception to provide reporting multiple exceptions at once.
"""
def __init__(self, exceptions, tracebacks):
# an exception and the corresponding traceback should be found at the same
# position in their respective lists, otherwise __str__ will be incorrect
self.exceptions = exceptions
self.tracebacks = tracebacks
def __str__(self):
output = "\n****************************** BEGIN MultiError ******************************\n"
for (exc, tb) in zip(self.exceptions, self.tracebacks):
output += str(exc)
output += str(tb) + "\n"
output += "****************************** END MultiError ******************************"
return output
def run_scenarios(scenarios, handler, deferred_exceptions=tuple()):
"""
Runs multiple scenarios from within a single test method.
"Scenarios" are mini-tests where a common procedure can be reused with several different configurations.
They are intended for situations where complex/expensive setup isn't required and some shared state is acceptable (or trivial to reset).
Arguments: scenarios should be an iterable, handler should be a callable, and deferred_exceptions should be a tuple of exceptions which
are safe to delay until the scenarios are all run. For each item in scenarios, handler(item) will be called in turn.
Exceptions which occur will be bundled up and raised as a single MultiError exception, either when: a) all scenarios have run,
or b) on the first exception encountered which is not whitelisted in deferred_exceptions.
"""
errors = []
tracebacks = []
for i, scenario in enumerate(scenarios, 1):
debug("running scenario {}/{}: {}".format(i, len(scenarios), scenario))
try:
handler(scenario)
except deferred_exceptions as e:
tracebacks.append(traceback.format_exc(sys.exc_info()))
errors.append(type(e)('encountered {} {} running scenario:\n {}\n'.format(e.__class__.__name__, e.message, scenario)))
debug("scenario {}/{} encountered a deferrable exception, continuing".format(i, len(scenarios)))
except Exception as e:
# catch-all for any exceptions not intended to be deferred
tracebacks.append(traceback.format_exc(sys.exc_info()))
errors.append(type(e)('encountered {} {} running scenario:\n {}\n'.format(e.__class__.__name__, e.message, scenario)))
debug("scenario {}/{} encountered a non-deferrable exception, aborting".format(i, len(scenarios)))
raise MultiError(errors, tracebacks)
if errors:
raise MultiError(errors, tracebacks)
|
{
"content_hash": "36766a678042092d1b94ff3ad95d2a6d",
"timestamp": "",
"source": "github",
"line_count": 1175,
"max_line_length": 148,
"avg_line_length": 38.733617021276594,
"alnum_prop": 0.6122561082791351,
"repo_name": "mambocab/cassandra-dtest",
"id": "dc5383be7cbb521faae15e181a8b7f78d08936d9",
"size": "45512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dtest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2158572"
},
{
"name": "Shell",
"bytes": "1999"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask.ext.testing import TestCase
from flask_nicely import nice_json
from flask_nicely.errors import NotFound
class TestFlaskNicely(TestCase):
def create_app(self):
app = Flask(__name__)
app.config['TESTING'] = True
return app
def test_success(self):
"""
Test that if the decorated function does not throw an exception,
a correctly-formed JSON response is returned containing data returned
by the function.
"""
data = {
"name": "Arthur, King of the Britons",
"quest": "To seek the Holy Grail",
"air-speed velocity of unladen swallow": "An African or a European swallow?",
}
@nice_json
def success_function():
return data
response = success_function()
self.assertEqual(
{'data': data, 'status': 200, 'error': None},
response.json)
self.assertEqual(200, response.status_code)
def test_404(self):
"""
Test that if the decorated function throws a NotFound error, a JSON
response of status 404 is returned with a generic error message.
"""
@nice_json
def error_function():
raise NotFound()
response = error_function()
self.assertEqual(
{'data': None, 'status': 404, 'error': "Not Found"},
response.json)
self.assertEqual(404, response.status_code)
def test_404_custom_message(self):
"""
Test that if the decorated function throws a NotFound error with a
specified message, a JSON response of status 404 is returned with
'error' set to the custom message.
"""
@nice_json
def error_function():
raise NotFound("Could not find the Grail!")
response = error_function()
self.assertEqual(
{'data': None, 'status': 404, 'error': "Could not find the Grail!"},
response.json)
self.assertEqual(404, response.status_code)
def test_404_custom_payload(self):
"""
Test that if the decorated function throws a NotFound error with an
additional payload, a JSON response of status 404 is returned with
extra keys from the payload included.
"""
test_payload = {
'error_detail': "The resource that you requested was not found on the server",
'documentation': "http://www.flask-nicely.readthedocs.org",
}
@nice_json
def error_function():
raise NotFound(payload=test_payload)
response = error_function()
self.assertEqual({
'data': None, 'status': 404, 'error': "Not Found",
'error_detail': "The resource that you requested was not found on the server",
'documentation': "http://www.flask-nicely.readthedocs.org",
},
response.json)
self.assertEqual(404, response.status_code)
def test_exception_debug(self):
"""
Test that if the decorated function throws an unspecified exception,
then the decorator will raise it if the app is in debug mode.
"""
self.app.config['DEBUG'] = True
@nice_json
def error_function():
raise Exception("I am an exception")
with self.assertRaises(Exception):
response = error_function()
def test_exception_live(self):
"""
Test that if the decorated function throws an unspecified exception,
then the decorator will return a JSON response of status 500 if the app
is not in debug mode.
"""
self.app.config['DEBUG'] = False
@nice_json
def error_function():
raise Exception("I am an exception")
response = error_function()
self.assertEqual(
{'data': None, 'status': 500, 'error': "Internal Server Error"},
response.json)
self.assertEqual(500, response.status_code)
|
{
"content_hash": "953530ec99ca1bbe0a2ba7fa7d418fa9",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 90,
"avg_line_length": 29.33093525179856,
"alnum_prop": 0.58719646799117,
"repo_name": "Jwpe/Flask-Nicely",
"id": "a5e2aa8a88d4cb502cab2fab1a5a9dc2d7ce19a4",
"size": "4077",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_flask_nicely.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "19269"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import ray
from ray.experimental.serve import SingleQuery
from ray.experimental.serve.examples.adder import ScalerAdder, VectorizedAdder
from ray.experimental.serve.examples.counter import Counter, CustomCounter
from ray.experimental.serve.object_id import get_new_oid
INCREMENT = 3
@pytest.fixture(scope="module")
def ray_start():
ray.init(num_cpus=4)
yield
ray.shutdown()
@pytest.fixture
def generated_inputs():
deadline = 11111.11
inputs = []
input_arr = np.arange(10)
for i in input_arr:
oid = get_new_oid()
inputs.append(
SingleQuery(data=i, result_object_id=oid, deadline_s=deadline))
return inputs
def test_vadd(ray_start, generated_inputs):
adder = VectorizedAdder.remote(INCREMENT)
inputs = generated_inputs
oids = [inp.result_object_id for inp in inputs]
input_data = [inp.data for inp in inputs]
adder._dispatch.remote(inputs)
result_arr = np.array(ray.get(oids))
assert np.array_equal(result_arr, np.array(input_data) + INCREMENT)
def test_batched_input(ray_start, generated_inputs):
counter = Counter.remote()
counter._dispatch.remote(generated_inputs)
oids = [inp.result_object_id for inp in generated_inputs]
returned_query_ids = np.array(ray.get(oids))
assert np.array_equal(returned_query_ids, np.arange(1, 11))
def test_custom_method(ray_start, generated_inputs):
dummy = CustomCounter.remote()
dummy._dispatch.remote(generated_inputs)
oids = [inp.result_object_id for inp in generated_inputs]
returned_query_ids = np.array(ray.get(oids))
assert np.array_equal(returned_query_ids, np.ones(10))
def test_exception(ray_start):
adder = ScalerAdder.remote(INCREMENT)
query = SingleQuery("this can't be added with int", get_new_oid(), 10)
adder._dispatch.remote([query])
with pytest.raises(ray.worker.RayTaskError):
ray.get(query.result_object_id)
|
{
"content_hash": "08a6f500eba462cf49fa3ef598a53270",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 30.455882352941178,
"alnum_prop": 0.711733462095606,
"repo_name": "atumanov/ray",
"id": "3b2748b73bf3d150a8ed8b2b266524ba285e8c76",
"size": "2071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/experimental/serve/tests/test_actors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20715"
},
{
"name": "C++",
"bytes": "1036803"
},
{
"name": "CSS",
"bytes": "9262"
},
{
"name": "Dockerfile",
"bytes": "3411"
},
{
"name": "HTML",
"bytes": "32704"
},
{
"name": "Java",
"bytes": "517715"
},
{
"name": "JavaScript",
"bytes": "8178"
},
{
"name": "Jupyter Notebook",
"bytes": "1610"
},
{
"name": "Python",
"bytes": "3081422"
},
{
"name": "Ruby",
"bytes": "956"
},
{
"name": "Shell",
"bytes": "76928"
},
{
"name": "Smarty",
"bytes": "955"
}
],
"symlink_target": ""
}
|
import logging
import json
from webspider.tasks.celery_app import celery_app
from webspider.controllers import keyword_statistic_ctl
from webspider.models import (KeywordModel, JobModel, JobKeywordModel, KeywordStatisticModel)
logger = logging.getLogger(__name__)
@celery_app.task()
def update_keywords_statistic_task():
"""更新关键词统计任务"""
keywords = KeywordModel.list()
for keyword in keywords:
update_single_keyword_statistic_task.delay(keyword.id)
@celery_app.task()
def update_single_keyword_statistic_task(keyword_id):
"""更新关键词统计任务"""
job_keywords = JobKeywordModel.list(filter_by={'keyword_id': keyword_id})
jobs = JobModel.list(filter=(JobModel.id.in_([job_keyword.job_id for job_keyword in job_keywords])))
if not jobs:
return
educations_statistic = keyword_statistic_ctl.get_educations_statistic(jobs=jobs)
finance_stage_statistic = keyword_statistic_ctl.get_finance_stage_statistic(jobs=jobs)
city_jobs_count_statistic = keyword_statistic_ctl.get_city_jobs_count_statistic(jobs=jobs)
salary_statistic = keyword_statistic_ctl.get_salary_statistic(jobs=jobs)
work_years_statistic = keyword_statistic_ctl.get_work_years_statistic(jobs=jobs)
statistic_values = dict(
keyword_id=keyword_id,
educations=json.dumps(educations_statistic),
city_jobs_count=json.dumps(city_jobs_count_statistic),
salary=json.dumps(salary_statistic),
financing_stage=json.dumps(finance_stage_statistic),
work_years=json.dumps(work_years_statistic)
)
if KeywordStatisticModel.is_exist(filter_by={'keyword_id': keyword_id}):
KeywordStatisticModel.update(filter_by={'keyword_id': keyword_id}, values=statistic_values)
else:
KeywordStatisticModel.add(**statistic_values)
|
{
"content_hash": "2599e38b79081c6e14d7ad7d2b1358c3",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 104,
"avg_line_length": 39.130434782608695,
"alnum_prop": 0.7316666666666667,
"repo_name": "GuozhuHe/webspider",
"id": "d6209ff3cf569c09b183f52fc736a90af13e6c9e",
"size": "1851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webspider/tasks/actor/keyword_statistic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "599"
},
{
"name": "Python",
"bytes": "2509425"
},
{
"name": "TSQL",
"bytes": "9772"
}
],
"symlink_target": ""
}
|
import ctypes as ct
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import os
# Setup for individual channels, these are default values
class channel_setup:
def __init__(self, channel):
# Set channel
self.channel = channel
# Set bit corresponding to channel in mask
self.coincidence_masking_expression = 2**channel
# Default values for all members
channel = 0
trig_level = 0
reset_hysteresis = 0
trigger_arm_hysteresis = 0
reset_arm_hysteresis = 0
# 1: Rising, 0: Falling
trigger_polarity = 1
reset_polarity = 0
coincidence_window_length = 1000
coincidence_masking_expression = 0
number_of_records = 1
record_variable_length = 1
nof_pretrigger_samples = 0
nof_moving_average_samples = 0
moving_average_delay = 0
samples_per_record = 1024
# = Record size if record_variable_length is 0
trailing_edge_window = samples_per_record
# Common setup for acquisition, these are default values
class acquisition_setup:
# Collect data from all four channels
channels_mask = 0b1111
# Define the record header struct
class HEADER(ct.Structure):
_fields_ = [("RecordStatus", ct.c_ubyte),
("UserID", ct.c_ubyte),
("Channel", ct.c_ubyte),
("DataFormat", ct.c_ubyte),
("SerialNumber", ct.c_uint32),
("RecordNumber", ct.c_uint32),
("SamplePeriod", ct.c_int32),
("Timestamp", ct.c_int64),
("RecordStart", ct.c_int64),
("RecordLength", ct.c_uint32),
("MovingAverage", ct.c_int16),
("GateCounter", ct.c_uint16)]
# This function loads the ADQAPI library using ctypes
def adqapi_load():
if os.name == 'nt':
ADQAPI = ct.cdll.LoadLibrary('ADQAPI.dll')
else:
ADQAPI = ct.cdll.LoadLibrary('libadq.so')
# Manually set return type from some ADQAPI functions
ADQAPI.CreateADQControlUnit.restype = ct.c_void_p
ADQAPI.ADQ_GetRevision.restype = ct.c_void_p
ADQAPI.ADQ_GetPtrStream.restype = ct.POINTER(ct.c_int16)
ADQAPI.ADQControlUnit_FindDevices.argtypes = [ct.c_void_p]
# Print ADQAPI revision
print('ADQAPI loaded, revision {:d}.'.format(ADQAPI.ADQAPI_GetRevision()))
return ADQAPI
# This function unloads the ADQAPI library using ctypes
def adqapi_unload(ADQAPI):
if os.name == 'nt':
# Unload DLL
ct.windll.kernel32.FreeLibrary(ADQAPI._handle)
# Convenience function when printing status from ADQAPI functions
def adq_status(status):
if (status==0):
return 'FAILURE'
else:
return 'OK'
# Print revision info for an ADQ device
def print_adq_device_revisions(ADQAPI, adq_cu, adq_num):
# Get revision info from ADQ
rev = ADQAPI.ADQ_GetRevision(adq_cu, adq_num)
revision = ct.cast(rev,ct.POINTER(ct.c_int))
print('\nConnected to ADQ #{:d}'.format(adq_num))
# Print revision information
print('FPGA Revision: {}'.format(revision[0]))
if (revision[1]):
print('Local copy')
else:
print('SVN Managed')
if (revision[2]):
print('Mixed Revision')
else :
print('SVN Updated')
print('')
# This function sets an alternating background color for a matplotlib plot
def alternate_background(ax, start_point, widths, labels=False,
color='#dddddd'):
ax.relim()
# update ax.viewLim using the new dataLim
ax.autoscale_view()
plt.draw()
# Calculate starting points
edges = start_point+np.cumsum(np.append([0],widths))
# Set plot x axis length
ax.set_xlim(start_point, edges[-1])
ylim=ax.get_ylim()
# Draw colored fields for every other width
for idx in range(1,len(edges)-1,2):
ax.add_patch(
patches.Rectangle(
(edges[idx], ylim[0]), # point(x,y)
widths[idx], # width
ylim[1]-ylim[0], # height
facecolor=color,
edgecolor='none',
zorder=-20
)
)
# Optionally draw labels
if labels==True:
for idx in range(0,len(edges)-1):
# Set y-position 1% under top
ypos=(ylim[1])-0.01*(ylim[1]-ylim[0])
# Enumerate fields
plt.text(edges[idx], ypos,
'R{}'.format(idx), verticalalignment='top')
def collecting(channel_setup, records_completed):
state = False
for ch in range(len(channel_setup)):
state = state or (records_completed[ch] < channel_setup[ch].number_of_records)
return state
def print_event_counters(adqapi, adq_cu, adq_num):
status = ct.c_uint()
lt_tevent_ctr = ct.c_uint()
lt_revent_ctr = ct.c_uint()
ul_tevent_ctr = ct.c_uint()
ul_revent_ctr = ct.c_uint()
pt_tevent_ctr = ct.c_uint()
pt_revent_ctr = ct.c_uint()
acq_tevent_ctr = ct.c_uint()
acq_revent_ctr = ct.c_uint()
acq_revent_pt_ctr = ct.c_uint()
status = adqapi.ADQ_PDGetEventCounters(adq_cu, adq_num,
ct.byref(lt_tevent_ctr),
ct.byref(lt_revent_ctr),
ct.byref(ul_tevent_ctr),
ct.byref(ul_revent_ctr),
ct.byref(pt_tevent_ctr),
ct.byref(pt_revent_ctr),
ct.byref(acq_tevent_ctr),
ct.byref(acq_revent_ctr),
ct.byref(acq_revent_pt_ctr))
print('ADQAPI.ADQ_PDGetEventCounters returned {}'.format(adq_status(status)))
print('LT tevent ctr: {}'.format(lt_tevent_ctr.value))
print('LT revent ctr: {}'.format(lt_revent_ctr.value))
print('UL tevent ctr: {}'.format(ul_tevent_ctr.value))
print('UL revent ctr: {}'.format(ul_revent_ctr.value))
print('PT tevent ctr: {}'.format(pt_tevent_ctr.value))
print('PT revent ctr: {}'.format(pt_revent_ctr.value))
print('AQ tevent ctr: {}'.format(acq_tevent_ctr.value))
print('AQ revent ctr: {}'.format(acq_revent_ctr.value))
print('AQ revent pt ctr: {}'.format(acq_revent_pt_ctr.value))
return
|
{
"content_hash": "60d0bd90df5c814636c2a2b9a6d60af1",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 82,
"avg_line_length": 36.28651685393258,
"alnum_prop": 0.575940548072457,
"repo_name": "thomasbarillot/DAQ",
"id": "2d66700f1af5b621a5094cb4b8ca651d3a46463d",
"size": "6459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HHGMonitor/ADQAPI_python/FWPD/modules/example_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "113"
},
{
"name": "C",
"bytes": "582651"
},
{
"name": "C++",
"bytes": "836618"
},
{
"name": "Cuda",
"bytes": "15224"
},
{
"name": "HTML",
"bytes": "2750152"
},
{
"name": "MATLAB",
"bytes": "38740"
},
{
"name": "Python",
"bytes": "863343"
},
{
"name": "TeX",
"bytes": "888280"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from socket import timeout as SocketTimeout
from socket import error as SocketError
import time
import logging
try:
from .esthrift import Rest
from .esthrift.ttypes import Method, RestRequest
from thrift.transport import TTransport, TSocket, TSSLSocket
from thrift.protocol import TBinaryProtocol
from thrift.Thrift import TException
THRIFT_AVAILABLE = True
except ImportError:
THRIFT_AVAILABLE = False
from ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout
from .pooling import PoolingConnection
logger = logging.getLogger('elasticsearch')
class ThriftConnection(PoolingConnection):
"""
This connection class is deprecated and may be removed in future versions.
Connection using the `thrift` protocol to communicate with elasticsearch.
See https://github.com/elasticsearch/elasticsearch-transport-thrift for additional info.
"""
transport_schema = 'thrift'
def __init__(self, host='localhost', port=9500, framed_transport=False, use_ssl=False, **kwargs):
"""
:arg framed_transport: use `TTransport.TFramedTransport` instead of
`TTransport.TBufferedTransport`
"""
if not THRIFT_AVAILABLE:
raise ImproperlyConfigured("Thrift is not available.")
super(ThriftConnection, self).__init__(host=host, port=port, **kwargs)
self._framed_transport = framed_transport
self._tsocket_class = TSocket.TSocket
if use_ssl:
self._tsocket_class = TSSLSocket.TSSLSocket
self._tsocket_args = (host, port)
def _make_connection(self):
socket = self._tsocket_class(*self._tsocket_args)
socket.setTimeout(self.timeout * 1000.0)
if self._framed_transport:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Rest.Client(protocol)
client.transport = transport
transport.open()
return client
def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
request = RestRequest(method=Method._NAMES_TO_VALUES[method.upper()], uri=url,
parameters=params, body=body)
start = time.time()
tclient = None
try:
tclient = self._get_connection()
response = tclient.execute(request)
duration = time.time() - start
except SocketTimeout as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
raise ConnectionTimeout('TIMEOUT', str(e), e)
except (TException, SocketError) as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
if tclient:
try:
# try closing transport socket
tclient.transport.close()
except Exception as e:
logger.warning(
'Exception %s occured when closing a failed thrift connection.',
e, exc_info=True
)
raise ConnectionError('N/A', str(e), e)
self._release_connection(tclient)
if not (200 <= response.status < 300) and response.status not in ignore:
self.log_request_fail(method, url, body, duration, response.status)
self._raise_error(response.status, response.body)
self.log_request_success(method, url, url, body, response.status,
response.body, duration)
headers = {}
if response.headers:
headers = dict((k.lower(), v) for k, v in response.headers.items())
return response.status, headers, response.body or ''
|
{
"content_hash": "ae1cecf1e98f6f215ff7a611798fa9c3",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 101,
"avg_line_length": 38.336633663366335,
"alnum_prop": 0.6397210743801653,
"repo_name": "liuyi1112/elasticsearch-py",
"id": "172868c8d1c518bcf991cf785f7150633e3f2866",
"size": "3872",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "elasticsearch/connection/thrift.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "297805"
}
],
"symlink_target": ""
}
|
"""
A module for the binomial distribution node
"""
import numpy as np
import scipy.special as special
from .expfamily import (ExponentialFamily,
ExponentialFamilyDistribution,
useconstructor)
from .beta import BetaMoments
from .poisson import PoissonMoments
from .node import (Moments,
ensureparents)
from bayespy.utils import misc, random
class BinomialMoments(PoissonMoments):
"""
Class for the moments of binomial variables
"""
def __init__(self, N):
self.N = N
super().__init__()
def compute_fixed_moments(self, x):
"""
Compute the moments for a fixed value
"""
# Make sure the values are integers in valid range
x = np.asanyarray(x)
if np.any(x > self.N):
raise ValueError("Invalid count")
return super().compute_fixed_moments()
def compute_dims_from_values(self, x):
"""
Return the shape of the moments for a fixed value.
The realizations are scalars, thus the shape of the moment is ().
"""
raise DeprecationWarning()
return super().compute_dims_from_values()
class BinomialDistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of binomial variables.
"""
def __init__(self, N):
N = np.asanyarray(N)
if not misc.isinteger(N):
raise ValueError("Number of trials must be integer")
if np.any(N < 0):
raise ValueError("Number of trials must be non-negative")
self.N = np.asanyarray(N)
super().__init__()
def compute_message_to_parent(self, parent, index, u_self, u_p):
"""
Compute the message to a parent node.
"""
if index == 0:
x = u_self[0][...,None]
n = self.N[...,None]
m0 = x*[1, -1] + n*[0, 1]
m = [m0]
return m
else:
raise ValueError("Incorrect parent index")
def compute_phi_from_parents(self, u_p, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
logp0 = u_p[0][...,0]
logp1 = u_p[0][...,1]
phi0 = logp0 - logp1
return [phi0]
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
u0 = self.N / (1 + np.exp(-phi[0]))
g = -self.N * np.log1p(np.exp(phi[0]))
return ( [u0], g )
def compute_cgf_from_parents(self, u_p):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
logp0 = u_p[0][...,0]
logp1 = u_p[0][...,1]
return self.N * logp1
def compute_fixed_moments_and_f(self, x, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
# Make sure the values are integers in valid range
x = np.asanyarray(x)
if not misc.isinteger(x):
raise ValueError("Counts must be integer")
if np.any(x < 0) or np.any(x > self.N):
raise ValueError("Invalid count")
# Now, the moments are just the counts
u = [x]
f = (special.gammaln(self.N+1) -
special.gammaln(x+1) -
special.gammaln(self.N-x+1))
return (u, f)
def random(self, *phi, plates=None):
"""
Draw a random sample from the distribution.
"""
p = random.logodds_to_probability(phi[0])
return np.random.binomial(self.N, p, size=plates)
class Binomial(ExponentialFamily):
r"""
Node for binomial random variables.
The node models the number of successes :math:`x \in \{0, \ldots, n\}` in
:math:`n` trials with probability :math:`p` for success:
.. math::
x \sim \mathrm{Binomial}(n, p).
Parameters
----------
n : scalar or array
Number of trials
p : beta-like node or scalar or array
Probability of a success in a trial
Examples
--------
>>> import warnings
>>> warnings.filterwarnings('ignore', category=RuntimeWarning)
>>> from bayespy.nodes import Binomial, Beta
>>> p = Beta([1e-3, 1e-3])
>>> x = Binomial(10, p)
>>> x.observe(7)
>>> p.update()
>>> import bayespy.plot as bpplt
>>> import numpy as np
>>> bpplt.pdf(p, np.linspace(0, 1, num=100))
[<matplotlib.lines.Line2D object at 0x...>]
See also
--------
Bernoulli, Multinomial, Beta
"""
def __init__(self, n, p, **kwargs):
"""
Create binomial node
"""
super().__init__(n, p, **kwargs)
@classmethod
def _constructor(cls, n, p, **kwargs):
"""
Constructs distribution and moments objects.
"""
p = cls._ensure_moments(p, BetaMoments)
parents = [p]
moments = BinomialMoments(n)
parent_moments = (p._moments,)
distribution = BinomialDistribution(n)
return ( parents,
kwargs,
( (), ),
cls._total_plates(kwargs.get('plates'),
distribution.plates_from_parent(0, p.plates),
np.shape(n)),
distribution,
moments,
parent_moments)
def __str__(self):
"""
Print the distribution using standard parameterization.
"""
p = 1 / (1 + np.exp(-self.phi[0]))
n = self._distribution.N
return ("%s ~ Binomial(n, p)\n"
" n = \n"
"%s\n"
" p = \n"
"%s\n"
% (self.name, n, p))
|
{
"content_hash": "34ab9e89df683935b7dc6892a2aeff9d",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 80,
"avg_line_length": 26.190045248868778,
"alnum_prop": 0.5171043538355218,
"repo_name": "dungvtdev/upsbayescpm",
"id": "ae0fa2fe311b9f8ef133ac7b445957c821585801",
"size": "6038",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bayespy/inference/vmp/nodes/binomial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "902"
},
{
"name": "Python",
"bytes": "2515965"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
}
|
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "roomba"
PROJECT_SPACE_DIR = "/home/joemelt101/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
{
"content_hash": "26f0ae463f09d059b889f54f521f3134",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 45.285714285714285,
"alnum_prop": 0.6340694006309149,
"repo_name": "joemelt101/BIR_Labs",
"id": "a7db23b5dddf388dc2b251d3667be7f1dbb09abd",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/roomba/catkin_generated/pkg.installspace.context.pc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11935"
},
{
"name": "C++",
"bytes": "56666"
},
{
"name": "CMake",
"bytes": "339665"
},
{
"name": "Common Lisp",
"bytes": "35106"
},
{
"name": "Makefile",
"bytes": "609904"
},
{
"name": "Python",
"bytes": "83246"
},
{
"name": "Shell",
"bytes": "8036"
},
{
"name": "SourcePawn",
"bytes": "414"
}
],
"symlink_target": ""
}
|
"""
Release script.
"""
import argparse
import sys
from subprocess import check_call
from colorama import init, Fore
from git import Repo, Remote
def create_branch(version):
"""Create a fresh branch from upstream/main"""
repo = Repo.init(".")
if repo.is_dirty(untracked_files=True):
raise RuntimeError("Repository is dirty, please commit/stash your changes.")
branch_name = f"release-{version}"
print(f"{Fore.CYAN}Create {branch_name} branch from upstream main")
upstream = get_upstream(repo)
upstream.fetch()
release_branch = repo.create_head(branch_name, upstream.refs.main, force=True)
release_branch.checkout()
return repo
def get_upstream(repo: Repo) -> Remote:
"""Find upstream repository for pluggy on the remotes"""
for remote in repo.remotes:
for url in remote.urls:
if url.endswith(("pytest-dev/pluggy.git", "pytest-dev/pluggy")):
return remote
raise RuntimeError("could not find pytest-dev/pluggy remote")
def pre_release(version):
"""Generates new docs, release announcements and creates a local tag."""
create_branch(version)
changelog(version, write_out=True)
check_call(["git", "commit", "-a", "-m", f"Preparing release {version}"])
print()
print(f"{Fore.GREEN}Please push your branch to your fork and open a PR.")
def changelog(version, write_out=False):
if write_out:
addopts = []
else:
addopts = ["--draft"]
print(f"{Fore.CYAN}Generating CHANGELOG")
check_call(["towncrier", "--yes", "--version", version] + addopts)
def main():
init(autoreset=True)
parser = argparse.ArgumentParser()
parser.add_argument("version", help="Release version")
options = parser.parse_args()
try:
pre_release(options.version)
except RuntimeError as e:
print(f"{Fore.RED}ERROR: {e}")
return 1
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "e6a33f4ad6bbd10f8ada6e3274be7913",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 84,
"avg_line_length": 28.28985507246377,
"alnum_prop": 0.6542008196721312,
"repo_name": "hpk42/pluggy",
"id": "e09b8c77b1163d82b4169104dc5f608731559849",
"size": "1952",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "scripts/release.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60108"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
from contextlib import contextmanager
from pants.cache.artifact import TarballArtifact
from pants.cache.artifact_cache import ArtifactCache, UnreadableArtifact
from pants.util.contextutil import temporary_file
from pants.util.dirutil import (safe_delete, safe_mkdir, safe_mkdir_for,
safe_rm_oldest_items_in_dir, safe_rmtree)
logger = logging.getLogger(__name__)
class BaseLocalArtifactCache(ArtifactCache):
def __init__(self, artifact_root, compression, permissions=None):
"""
:param str artifact_root: The path under which cacheable products will be read/written.
:param int compression: The gzip compression level for created artifacts.
Valid values are 0-9.
:param string permissions: File permissions to use when creating artifact files.
"""
super(BaseLocalArtifactCache, self).__init__(artifact_root)
self._compression = compression
self._cache_root = None
self._permissions = permissions
def _artifact(self, path):
return TarballArtifact(self.artifact_root, path, self._compression)
@contextmanager
def _tmpfile(self, cache_key, use):
"""Allocate tempfile on same device as cache with a suffix chosen to prevent collisions"""
with temporary_file(suffix=cache_key.id + use, root_dir=self._cache_root,
permissions=self._permissions) as tmpfile:
yield tmpfile
@contextmanager
def insert_paths(self, cache_key, paths):
"""Gather paths into artifact, store it, and yield the path to stored artifact tarball."""
with self._tmpfile(cache_key, 'write') as tmp:
self._artifact(tmp.name).collect(paths)
yield self._store_tarball(cache_key, tmp.name)
def store_and_use_artifact(self, cache_key, src, results_dir=None):
"""Read the content of a tarball from an iterator and return an artifact stored in the cache."""
with self._tmpfile(cache_key, 'read') as tmp:
for chunk in src:
tmp.write(chunk)
tmp.close()
tarball = self._store_tarball(cache_key, tmp.name)
artifact = self._artifact(tarball)
if results_dir is not None:
safe_rmtree(results_dir)
artifact.extract()
return True
def _store_tarball(self, cache_key, src):
"""Given a src path to an artifact tarball, store it and return stored artifact's path."""
pass
class LocalArtifactCache(BaseLocalArtifactCache):
"""An artifact cache that stores the artifacts in local files."""
def __init__(self, artifact_root, cache_root, compression, max_entries_per_target=None,
permissions=None):
"""
:param str artifact_root: The path under which cacheable products will be read/written.
:param str cache_root: The locally cached files are stored under this directory.
:param int compression: The gzip compression level for created artifacts (1-9 or false-y).
:param int max_entries_per_target: The maximum number of old cache files to leave behind on a cache miss.
:param str permissions: File permissions to use when creating artifact files.
"""
super(LocalArtifactCache, self).__init__(
artifact_root,
compression,
permissions=int(permissions.strip(), base=8) if permissions else None,
)
self._cache_root = os.path.realpath(os.path.expanduser(cache_root))
self._max_entries_per_target = max_entries_per_target
safe_mkdir(self._cache_root)
def prune(self, root):
"""Prune stale cache files
If the option --cache-target-max-entry is greater than zero, then prune will remove all but n
old cache files for each target/task.
:param str root: The path under which cacheable artifacts will be cleaned
"""
max_entries_per_target = self._max_entries_per_target
if os.path.isdir(root) and max_entries_per_target:
safe_rm_oldest_items_in_dir(root, max_entries_per_target)
def has(self, cache_key):
return self._artifact_for(cache_key).exists()
def _artifact_for(self, cache_key):
return self._artifact(self._cache_file_for_key(cache_key))
def use_cached_files(self, cache_key, results_dir=None):
tarfile = self._cache_file_for_key(cache_key)
try:
artifact = self._artifact_for(cache_key)
if artifact.exists():
if results_dir is not None:
safe_rmtree(results_dir)
artifact.extract()
return True
except Exception as e:
# TODO(davidt): Consider being more granular in what is caught.
logger.warn('Error while reading {0} from local artifact cache: {1}'.format(tarfile, e))
safe_delete(tarfile)
return UnreadableArtifact(cache_key, e)
return False
def try_insert(self, cache_key, paths):
with self.insert_paths(cache_key, paths):
pass
def delete(self, cache_key):
safe_delete(self._cache_file_for_key(cache_key))
def _store_tarball(self, cache_key, src):
dest = self._cache_file_for_key(cache_key)
safe_mkdir_for(dest)
os.rename(src, dest)
if self._permissions:
os.chmod(dest, self._permissions)
self.prune(os.path.dirname(dest)) # Remove old cache files.
return dest
def _cache_file_for_key(self, cache_key):
# Note: it's important to use the id as well as the hash, because two different targets
# may have the same hash if both have no sources, but we may still want to differentiate them.
return os.path.join(self._cache_root, cache_key.id, cache_key.hash) + '.tgz'
class TempLocalArtifactCache(BaseLocalArtifactCache):
"""A local cache that does not actually store any files between calls.
This implementation does not have a backing _cache_root, and never
actually stores files between calls, but is useful for handling file IO for a remote cache.
"""
def __init__(self, artifact_root, compression, permissions=None):
"""
:param str artifact_root: The path under which cacheable products will be read/written.
"""
super(TempLocalArtifactCache, self).__init__(artifact_root, compression=compression,
permissions=permissions)
def _store_tarball(self, cache_key, src):
return src
def has(self, cache_key):
return False
def use_cached_files(self, cache_key, results_dir=None):
return False
def delete(self, cache_key):
pass
|
{
"content_hash": "8765fe04d2dbf1759584b1fbf726c262",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 109,
"avg_line_length": 37.79651162790697,
"alnum_prop": 0.6892785725273035,
"repo_name": "ity/pants",
"id": "956645df8bb730dfada2109ec497e69562aaa68c",
"size": "6648",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/python/pants/cache/local_artifact_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1526"
},
{
"name": "HTML",
"bytes": "75140"
},
{
"name": "Java",
"bytes": "402667"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4960888"
},
{
"name": "Scala",
"bytes": "85556"
},
{
"name": "Shell",
"bytes": "58420"
},
{
"name": "Thrift",
"bytes": "2919"
}
],
"symlink_target": ""
}
|
"""
>>> from opaque_ext import *
Check for correct conversion
>>> use(get())
Check that None is converted to a NULL opaque pointer
>>> useany(get())
1
>>> useany(None)
0
Check that we don't lose type information by converting NULL
opaque pointers to None
>>> assert getnull() is None
>>> useany(getnull())
0
>>> failuse(get())
Traceback (most recent call last):
...
RuntimeError: success
Check that there is no conversion from integers ...
>>> try: use(0)
... except TypeError: pass
... else: print('expected a TypeError')
... and from strings to opaque objects
>>> try: use("")
... except TypeError: pass
... else: print('expected a TypeError')
Now check the same for another opaque pointer type
>>> use2(get2())
>>> failuse2(get2())
Traceback (most recent call last):
...
RuntimeError: success
>>> try: use2(0)
... except TypeError: pass
... else: print('expected a TypeError')
>>> try: use2("")
... except TypeError: pass
... else: print('expected a TypeError')
Check that opaque types are distinct
>>> try: use(get2())
... except TypeError: pass
... else: print('expected a TypeError')
>>> try: use2(get())
... except TypeError: pass
... else: print('expected a TypeError')
This used to result in a segmentation violation
>>> type(get()) != type (get2())
1
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
|
{
"content_hash": "defc1780eb9f0c343d36b92e8b4c0fe8",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 64,
"avg_line_length": 20.51851851851852,
"alnum_prop": 0.6215403128760529,
"repo_name": "hkernbach/arangodb",
"id": "311b1893a509f1427bfc47ebf9710ebda2917aff",
"size": "1885",
"binary": false,
"copies": "9",
"ref": "refs/heads/devel",
"path": "3rdParty/boost/1.62.0/libs/python/test/opaque.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "391227"
},
{
"name": "Awk",
"bytes": "7502"
},
{
"name": "Batchfile",
"bytes": "62496"
},
{
"name": "C",
"bytes": "9184899"
},
{
"name": "C#",
"bytes": "96431"
},
{
"name": "C++",
"bytes": "278343201"
},
{
"name": "CMake",
"bytes": "664691"
},
{
"name": "CSS",
"bytes": "650173"
},
{
"name": "CWeb",
"bytes": "174166"
},
{
"name": "Cuda",
"bytes": "52444"
},
{
"name": "DIGITAL Command Language",
"bytes": "259402"
},
{
"name": "Emacs Lisp",
"bytes": "14637"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "Groovy",
"bytes": "51836"
},
{
"name": "HTML",
"bytes": "2415724"
},
{
"name": "Java",
"bytes": "1048556"
},
{
"name": "JavaScript",
"bytes": "54219725"
},
{
"name": "LLVM",
"bytes": "24019"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "Lua",
"bytes": "17899"
},
{
"name": "M4",
"bytes": "658700"
},
{
"name": "Makefile",
"bytes": "522586"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "42998"
},
{
"name": "Objective-C",
"bytes": "98866"
},
{
"name": "Objective-C++",
"bytes": "2503"
},
{
"name": "PHP",
"bytes": "118092"
},
{
"name": "Pascal",
"bytes": "150599"
},
{
"name": "Perl",
"bytes": "906737"
},
{
"name": "Perl 6",
"bytes": "25883"
},
{
"name": "PowerShell",
"bytes": "20434"
},
{
"name": "Python",
"bytes": "4557865"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "R",
"bytes": "5123"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Roff",
"bytes": "1089418"
},
{
"name": "Ruby",
"bytes": "1141022"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "508528"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "32117"
},
{
"name": "Visual Basic",
"bytes": "11568"
},
{
"name": "XSLT",
"bytes": "567028"
},
{
"name": "Yacc",
"bytes": "53063"
}
],
"symlink_target": ""
}
|
from candidator.adapters import CandidatorCalculator
from candidator.models import Position
class YQSCalculator(CandidatorCalculator):
final_results_key = 'puntos'
order_reversed = False
def determine_match(self, person_position, external_position):
try:
diff = external_position.answervalue.value - person_position.answervalue.value
except Position.answervalue.RelatedObjectDoesNotExist, e:
diff = 0
return {"difference": abs(diff)}
def determine_points_per_person_per_category(self, explanation):
points = 0
for t in explanation:
if explanation[t]["difference"]:
points += explanation[t]["difference"]
return points
def determine_total_result_per_person(self, points_per_person, total_comparisons):
return {self.final_results_key: points_per_person}
def determine_not_match(self):
return {"difference": None}
|
{
"content_hash": "6dfc565039e94a3be68f3ccc903c59c1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 90,
"avg_line_length": 38.2,
"alnum_prop": 0.680628272251309,
"repo_name": "yank07/votai-theme",
"id": "9ca2fb31e8d4ab024dcc183b9fc659f1a02191be",
"size": "955",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "votai_theme/calculator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54859"
},
{
"name": "HTML",
"bytes": "114382"
},
{
"name": "JavaScript",
"bytes": "71340"
},
{
"name": "Makefile",
"bytes": "1243"
},
{
"name": "Python",
"bytes": "17058"
}
],
"symlink_target": ""
}
|
"""Test the importmulti RPC.
Test importmulti by generating keys on node0, importing the scriptPubKeys and
addresses on node1 and then testing the address info for the different address
variants.
- `get_key()` and `get_multisig()` are called to generate keys on node0 and
return the privkeys, pubkeys and all variants of scriptPubKey and address.
- `test_importmulti()` is called to send an importmulti call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.script import (
CScript,
OP_NOP,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet_util import (
get_key,
get_multisig,
test_address,
)
class ImportMultiTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def test_importmulti(self, req, success, error_code=None, error_message=None, warnings=None):
"""Run importmulti and assert success"""
if warnings is None:
warnings = []
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue() # Sync the timestamp to the wallet, so that importmulti works
node0_address1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
# Check only one address
assert_equal(node0_address1['ismine'], True)
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address (implicit non-internal)
self.log.info("Should import an address")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = key.p2pkh_addr
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
success=False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
# ScriptPubKey + internal + label
self.log.info("Should not allow a label to be specified when internal is true")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Unsuccessful labelling for internal addresses"},
success=False,
error_code=-8,
error_message='Internal addresses should not have a label')
# Nonstandard scriptPubKey + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + CScript([OP_NOP]).hex()
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Public key + !Internal(explicit)
self.log.info("Should import an address with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# Nonstandard scriptPubKey + Public key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info("Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
# Address + Private key + watchonly
self.log.info("Should import an address with private key and with watchonly")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
success=True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# Nonstandard scriptPubKey + Private key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# P2SH address
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr, timestamp=timestamp, iswatchonly=True, ismine=False, solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with the wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [wrong_key]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should import an address with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [wrong_privkey]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
# Import P2WPKH address as watch only
self.log.info("Should import a P2WPKH address as watch only")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=True,
solvable=False)
# Import P2WPKH address with public key but no private key
self.log.info("Should import a P2WPKH address and public key as solvable but not spendable")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2wpkh_addr,
ismine=False,
solvable=True)
# Import P2WPKH address with key and check it is spendable
self.log.info("Should import a P2WPKH address with key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=False,
ismine=True)
# P2WSH multisig address without scripts or keys
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2wsh multisig as watch only without respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=False)
# Same P2WSH multisig address as above, but now with witnessscript + private keys
self.log.info("Should import a p2wsh with respective witness script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now",
"witnessscript": multisig.redeem_script,
"keys": multisig.privkeys},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=True,
ismine=True,
sigsrequired=2)
# P2SH-P2WPKH address with no redeemscript or public or private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh without redeem script or keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=False,
ismine=False)
# P2SH-P2WPKH address + redeemscript + public key with no private key
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and pubkey as solvable")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=False)
# P2SH-P2WPKH address + redeemscript + private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True)
# P2SH-P2WSH multisig + redeemscript with no private key
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2sh-p2wsh with respective redeem script but no private key")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_p2wsh_addr},
"timestamp": "now",
"redeemscript": multisig.p2wsh_script,
"witnessscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_p2wsh_addr,
solvable=True,
ismine=False)
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
self.log.info("Should not import a p2sh-p2wpkh address from descriptor without checksum and private key")
self.test_importmulti({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now",
"label": "Unsuccessful P2SH-P2WPKH descriptor import",
"keys": [key.privkey]},
success=False,
error_code=-5,
error_message="Missing checksum")
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
p2sh_p2wpkh_label = "Successful P2SH-P2WPKH descriptor import"
self.log.info("Should import a p2sh-p2wpkh address from descriptor and private key")
self.test_importmulti({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"label": p2sh_p2wpkh_label,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True,
labels=[p2sh_p2wpkh_label])
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"] # hdkeypath=m/0'/0'/0' and 1'
addresses += ["bcrt1qrd3n235cj2czsfmsuvqqpr3lu6lg0ju7scl8gn", "bcrt1qfqeppuvj0ww98r6qghmdkj70tv8qpchehegrg8"] # wpkh subscripts corresponding to the above addresses
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info("Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor with xpriv
self.log.info("Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True)
for address in addresses:
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
address = "2MuhcG52uHPknxDgmGPsV18jSHFBnnRgjPg"
desc = "sh(wpkh(" + wif_priv + "))"
self.log.info("Should import a descriptor with a WIF private key as spendable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
# dump the private key to ensure it matches what was imported
privkey = self.nodes[1].dumpprivkey(address)
assert_equal(privkey, wif_priv)
# Test importing of a P2PKH address via descriptor
key = get_key(self.nodes[0])
p2pkh_label = "P2PKH descriptor import"
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": p2pkh_label},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
solvable=True,
ismine=False,
labels=[p2pkh_label])
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
self.log.info("Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = get_key(self.nodes[0])
self.log.info("Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = get_key(self.nodes[0])
key2 = get_key(self.nodes[0])
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(self.nodes[1],
key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info("Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress(address_type="bech32")
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + pub_fpr + pub_keypath[1:] +"]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress(address_type="bech32")
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
# Import some public keys to the keypool of a no privkey wallet
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(wallet_name="noprivkeys", disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress(address_type="bech32")
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress(address_type="bech32")
assert_equal(addr2, newaddr2)
# Import some public keys to the internal keypool of a no privkey wallet
self.log.info("Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress(address_type="bech32")
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress(address_type="bech32")
assert_equal(addr2, newaddr2)
# Import a multisig and make sure the keys don't go into the keypool
self.log.info('Imported scripts with pubkeys should not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wsh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info("Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc(self.default_wallet_name)
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], "Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv', # m/0'/0'/0
'bcrt1q8vprchan07gzagd5e6v9wd7azyucksq2xc76k8', # m/0'/0'/1
'bcrt1qtuqdtha7zmqgcrr26n2rqxztv5y8rafjp9lulu', # m/0'/0'/2
'bcrt1qau64272ymawq26t90md6an0ps99qkrse58m640', # m/0'/0'/3
'bcrt1qsg97266hrh6cpmutqen8s4s962aryy77jp0fg0', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range' : [0, 4],
}]
)
for i in range(0, 5):
addr = wrpc.getnewaddress('', 'bech32')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
|
{
"content_hash": "b58465204e77af6383fb3b28ca1cde77",
"timestamp": "",
"source": "github",
"line_count": 858,
"max_line_length": 316,
"avg_line_length": 50.49650349650349,
"alnum_prop": 0.5449845358445276,
"repo_name": "jnewbery/bitcoin",
"id": "13186b9e1d4e5350afed37580c3541ea518a73fd",
"size": "43540",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "test/functional/wallet_importmulti.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28173"
},
{
"name": "C",
"bytes": "959143"
},
{
"name": "C++",
"bytes": "8134257"
},
{
"name": "CMake",
"bytes": "29132"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "218255"
},
{
"name": "Makefile",
"bytes": "124030"
},
{
"name": "Objective-C",
"bytes": "113876"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2246986"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Sage",
"bytes": "35184"
},
{
"name": "Scheme",
"bytes": "9339"
},
{
"name": "Shell",
"bytes": "166312"
}
],
"symlink_target": ""
}
|
from django import forms
import voxel_globe.meta.models as models
class OrderVoxelWorldBaseForm(forms.Form):
image_set = forms.ModelChoiceField(label="Image Set",
queryset=models.ImageSet.objects.all().order_by('name'))
camera_set = forms.ModelChoiceField(label="Camera Set",
queryset=models.CameraSet.objects.all().order_by('name'))
scene = forms.ModelChoiceField(label="Scene",
queryset=models.Scene.objects.all().order_by('name'))
regularization = forms.BooleanField(label="Regularize?", required=False)
def __init__(self, *args, **kwargs):
super(OrderVoxelWorldBaseForm, self).__init__(*args, **kwargs)
from django.forms.widgets import HiddenInput
self.fields['regularization'].widget = HiddenInput()
#refines = forms.IntegerField(label="Number of refines?", min_value=0)
class OrderVoxelWorldDegreeForm(forms.Form):
south_d = forms.FloatField(label="South Latitude", help_text="degrees")
west_d = forms.FloatField(label="West Longitude", help_text="degrees")
bottom_d = forms.FloatField(label="Bottom Altitude", help_text="meters")
north_d = forms.FloatField(label="North Latitude", help_text="degrees")
east_d = forms.FloatField(label="East Longitude", help_text="degrees")
top_d = forms.FloatField(label="Top Altitude", help_text="meters")
voxel_size_d = forms.FloatField(label="Voxel Size", help_text="meters", min_value=0)
south_d.widget.attrs['class'] = 'bbox degree'
west_d.widget.attrs['class'] = 'bbox degree'
bottom_d.widget.attrs['class'] = 'bbox degree'
north_d.widget.attrs['class'] = 'bbox degree'
east_d.widget.attrs['class'] = 'bbox degree'
top_d.widget.attrs['class'] = 'bbox degree'
voxel_size_d.widget.attrs['class'] = 'degree'
class OrderVoxelWorldMeterForm(forms.Form):
south_m = forms.FloatField(label="South", help_text="meters")
west_m = forms.FloatField(label="West", help_text="meters")
bottom_m = forms.FloatField(label="Bottom Altitude", help_text="meters")
north_m = forms.FloatField(label="North", help_text="meters")
east_m = forms.FloatField(label="East", help_text="meters")
top_m = forms.FloatField(label="Top Altitude", help_text="meters")
voxel_size_m = forms.FloatField(label="Voxel Size", help_text="meters", min_value=0)
south_m.widget.attrs['class'] = 'bbox meter'
west_m.widget.attrs['class'] = 'bbox meter'
bottom_m.widget.attrs['class'] = 'bbox meter'
north_m.widget.attrs['class'] = 'bbox meter'
east_m.widget.attrs['class'] = 'bbox meter'
top_m.widget.attrs['class'] = 'bbox meter'
voxel_size_m.widget.attrs['class'] = 'meter'
class OrderVoxelWorldUnitForm(forms.Form):
south_u = forms.FloatField(label="South", help_text="units")
west_u = forms.FloatField(label="West", help_text="units")
bottom_u = forms.FloatField(label="Bottom Altitude", help_text="units")
north_u = forms.FloatField(label="North", help_text="units")
east_u = forms.FloatField(label="East", help_text="units")
top_u = forms.FloatField(label="Top Altitude", help_text="units")
voxel_size_u = forms.FloatField(label="Voxel Size", help_text="units", min_value=0)
south_u.widget.attrs['class'] = 'bbox unit'
west_u.widget.attrs['class'] = 'bbox unit'
bottom_u.widget.attrs['class'] = 'bbox unit'
north_u.widget.attrs['class'] = 'bbox unit'
east_u.widget.attrs['class'] = 'bbox unit'
top_u.widget.attrs['class'] = 'bbox unit'
voxel_size_u.widget.attrs['class'] = 'unit'
|
{
"content_hash": "308a5e25d9e532d171a301aa9da962d0",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 86,
"avg_line_length": 50.3235294117647,
"alnum_prop": 0.7051431911163063,
"repo_name": "ngageoint/voxel-globe",
"id": "ae06bdcd6a7859c737f0691c9fcd5a2b0d9eed22",
"size": "3422",
"binary": false,
"copies": "1",
"ref": "refs/heads/nga_p2_release",
"path": "voxel_globe/build_voxel_world/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10038"
},
{
"name": "HTML",
"bytes": "125002"
},
{
"name": "JavaScript",
"bytes": "296605"
},
{
"name": "Nginx",
"bytes": "2623"
},
{
"name": "Python",
"bytes": "377761"
},
{
"name": "Shell",
"bytes": "100665"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from kazoo.client import KazooClient
import kazoo
import sys, os, time
import re
from optparse import OptionParser
def do_zookeeper_read(zk, path):
print path
data, stat = zk.get(path)
print 'node info:', data
print 'node stat:', stat
children = zk.get_children(path)
print 'node children:', children
return (data, stat, children)
def do_zookeeper_read_tree(zk, path):
print path
data, stat = zk.get(path)
print 'node info:', data
print 'node stat:', stat
children = zk.get_children(path)
print 'node children:', children
for child in children:
do_zookeeper_read_tree(zk, path + '/' + child)
def do_zookeeper_create(zk, path, value):
print path
zk.create(path, value)
do_zookeeper_read(zk, path)
def do_zookeeper_delete(zk, path):
print path
zk.delete(path)
try:
do_zookeeper_read(zk, path)
except kazoo.exceptions.NoNodeError:
print 'deleted'
def do_zookeeper_update(zk, path, value):
print path
zk.set(path, value)
do_zookeeper_read(zk, path)
def do_zookeeper_copy(zk_src, src, zk_dst, dst):
data, stat, children = do_zookeeper_read(zk_src, path)
print '## copy %s -> %s (%s)' % (src, dst + src, data)
zk_dst.create(dst + src, data)
for child in children:
zookeeper_copy(zk_src, src + '/' + child, zk_dst, dst)
if __name__ == '__main__':
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage, version="%prog 1.0")
parser.add_option('-a', '--address', dest='address', default='', help='zookeeper address')
parser.add_option('-n', '--node', dest='node', default='', help='zookeeper node path')
parser.add_option('-r', '--read', dest='read', default=False, help='zookeeper node read', action='store_true')
parser.add_option('-c', '--create', dest='create', default='', help='zookeeper node create')
parser.add_option('-d', '--delete', dest='delete', default=False, help='zookeeper node delete', action='store_true')
parser.add_option('-', '--update', dest='update', default='', help='zookeeper node update')
parser.add_option('', '--copy', dest='copy', default='', help='zookeeper copy addr:port/new_path')
parser.add_option('', '--read_tree', dest='read_tree', default=False, help='zookeeper node read tree', action='store_true')
(options, args) = parser.parse_args()
zk = KazooClient(options.address)
zk.start()
if options.read:
do_zookeeper_read(zk, options.node)
elif options.create != '':
do_zookeeper_create(zk, options.node, options.create)
elif options.delete:
do_zookeeper_delete(zk, options.node)
elif options.update != '':
do_zookeeper_update(zk, options.node, options.update)
elif options.copy != '':
dest_addr, dest_path = options.copy.split('/', 1)
zk_dest = KazooClient(dest_addr)
zk_dest.start()
do_zookeeper_copy(zk, options.node, zk_dest, '/' + dest_path)
elif options.read_tree:
do_zookeeper_read_tree(zk, options.node)
else:
parser.print_usage();
|
{
"content_hash": "011bebf6eb3d591f319e3ccab0f4c4f9",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 124,
"avg_line_length": 28.153846153846153,
"alnum_prop": 0.6816939890710383,
"repo_name": "naver/arcus-python2-client",
"id": "bb8da6c20570a0499b8cb9906c7723d6d0f40712",
"size": "3554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zk_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "96109"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Page.updated_at'
db.alter_column(u'radpress_page', 'updated_at', self.gf('django.db.models.fields.DateTimeField')())
# Changing field 'Article.updated_at'
db.alter_column(u'radpress_article', 'updated_at', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'Page.updated_at'
db.alter_column(u'radpress_page', 'updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
# Changing field 'Article.updated_at'
db.alter_column(u'radpress_article', 'updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'radpress.article': {
'Meta': {'ordering': "('-created_at', '-updated_at')", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'content_body': ('django.db.models.fields.TextField', [], {}),
'cover_image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['radpress.EntryImage']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['radpress.Tag']", 'null': 'True', 'through': u"orm['radpress.ArticleTag']", 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
u'radpress.articletag': {
'Meta': {'object_name': 'ArticleTag'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['radpress.Article']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['radpress.Tag']"})
},
u'radpress.entryimage': {
'Meta': {'object_name': 'EntryImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'radpress.menu': {
'Meta': {'unique_together': "(('order', 'page'),)", 'object_name': 'Menu'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['radpress.Page']", 'unique': 'True'})
},
u'radpress.page': {
'Meta': {'ordering': "('-created_at', '-updated_at')", 'object_name': 'Page'},
'content': ('django.db.models.fields.TextField', [], {}),
'content_body': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
u'radpress.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['radpress']
|
{
"content_hash": "9bbc208098149fa2ad11e36e4efaf3d4",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 207,
"avg_line_length": 69.08849557522124,
"alnum_prop": 0.5561675419495324,
"repo_name": "ifearcompilererrors/fle_redesign",
"id": "bc819f0c91008a5c3121007191c8001b454cc49d",
"size": "7831",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "fle_redesign/apps/radpress/migrations/0005_auto__chg_field_page_updated_at__chg_field_article_updated_at.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import numpy as np
from emukit.core.initial_designs.latin_design import LatinDesign
from emukit.core.loop import FixedIterationsStoppingCondition, LoopState, UserFunctionWrapper
from emukit.core.parameter_space import ParameterSpace
from emukit.examples.fabolas import FabolasLoop
def fmin_fabolas(
func,
space: ParameterSpace,
s_min: float,
s_max: float,
n_iters: int,
n_init: int = 20,
marginalize_hypers: bool = True,
) -> LoopState:
"""
Simple interface for Fabolas which optimizes the hyperparameters of machine learning algorithms
by reasoning across training data set subsets. For further details see:
Fast Bayesian hyperparameter optimization on large datasets
A. Klein and S. Falkner and S. Bartels and P. Hennig and F. Hutter
Electronic Journal of Statistics (2017)
:param func: objective function which gets a hyperparameter configuration x and training dataset size s as input,
and return the validation error and the runtime after training x on s datapoints.
:param space: input space
:param s_min: minimum training dataset size (linear scale)
:param s_max: maximum training dataset size (linear scale)
:param n_iters: number of iterations
:param n_init: number of initial design points (needs to be smaller than num_iters)
:param marginalize_hypers: determines whether to use a MAP estimate or to marginalize over the GP hyperparameters
:return: LoopState with all evaluated data points
"""
initial_design = LatinDesign(space)
grid = initial_design.get_samples(n_init)
X_init = np.zeros([n_init, grid.shape[1] + 1])
Y_init = np.zeros([n_init, 1])
cost_init = np.zeros([n_init])
subsets = np.array([s_max // 2**i for i in range(2, 10)])[::-1]
idx = np.where(subsets < s_min)[0]
subsets[idx] = s_min
for it in range(n_init):
func_val, cost = func(x=grid[it], s=subsets[it % len(subsets)])
X_init[it] = np.concatenate((grid[it], np.array([subsets[it % len(subsets)]])))
Y_init[it] = func_val
cost_init[it] = cost
def wrapper(x):
y, c = func(x[0, :-1], x[0, -1])
return np.array([[y]]), np.array([[c]])
loop = FabolasLoop(
X_init=X_init,
Y_init=Y_init,
cost_init=cost_init,
space=space,
s_min=s_min,
s_max=s_max,
marginalize_hypers=marginalize_hypers,
)
loop.run_loop(
user_function=UserFunctionWrapper(wrapper, extra_output_names=["cost"]),
stopping_condition=FixedIterationsStoppingCondition(n_iters - n_init),
)
return loop.loop_state
|
{
"content_hash": "f511c7454388b5143164e9ee93ed1293",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 117,
"avg_line_length": 35.13333333333333,
"alnum_prop": 0.6732447817836812,
"repo_name": "EmuKit/emukit",
"id": "7f489bb4c2fb5c02361de0f0a8f646bb7cd3c1c7",
"size": "2635",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "emukit/examples/fabolas/fmin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "972291"
},
{
"name": "Stan",
"bytes": "1413"
}
],
"symlink_target": ""
}
|
"""Utility methods"""
from __future__ import print_function, unicode_literals
from builtins import input
try:
import configparser
except:
import ConfigParser as configparser
import os
import sys
import warnings
CONFIG_PATH = os.path.expanduser('~/.attbillsplitter.conf')
PAGE_LOADING_WAIT_S = 10
DATABASE_PATH = 'att_bill.db'
LOG_PATH = 'notif_history.log'
warnings.simplefilter('ignore')
def initialize_twiolio():
"""Initialize twilio credentials from command line input and save in
config file.
:returns: None
"""
number = input('Twilio Number (e.g. +11234567890): ')
account_sid = input('Twilio Account SID: ')
auth_token = input('Twilio Authentication Token: ')
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
if config.remove_section('twilio'):
print('\U00002B55 Old twilio credentials removed.'.encode("utf-8"))
config.add_section('twilio')
config.set('twilio', 'number', number)
config.set('twilio', 'account_sid', account_sid)
config.set('twilio', 'auth_token', auth_token)
with open(CONFIG_PATH, 'w') as configfile:
config.write(configfile)
print('\U00002705 New twilio account added.'.encode("utf-8"))
def load_twilio_config():
"""Load twilio credentials. Prompt to initialize if not yet initialized.
:returns: a tuple of twilio number, sid and auth token
:rtype: tuple
"""
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
# initialize twilio if not yet initialized
if 'twilio' not in config.sections():
initialize_twiolio()
config.read(CONFIG_PATH)
number = config.get('twilio', 'number')
account_sid = config.get('twilio', 'account_sid')
auth_token = config.get('twilio', 'auth_token')
return (number, account_sid, auth_token)
def initialize_payment_msg():
"""Initialize payment message to be appended to the charging details
before sending to users (generally a mesaage to tell users how to pay you).
:returns: None
"""
prompt_msg = ('You can enter a short message to put after the charge '
'details to send to your users. (For example, letting your '
'users know how to pay you)\n-> ')
message = input(prompt_msg)
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
if config.remove_section('message'):
print('\U00002B55 Old payment message removed.'.encode("utf-8"))
config.add_section('message')
config.set('message', 'payment', message)
with open(CONFIG_PATH, 'w') as configfile:
config.write(configfile)
print('\U00002705 New payment message saved.'.encode("utf-8"))
def load_payment_msg():
"""Load payment message. Prompt to initialize if not yet initialized.
:returns: payment message cached in config file
:rtype: str
"""
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
# initialize twilio if not yet initialized
if ('message' not in config.sections() or
'payment' not in [t for (t, _) in config.items('message')]):
initialize_payment_msg()
config.read(CONFIG_PATH)
else:
message = config.get('message', 'payment')
prompt = ('\U00002753 Do you want to keep using the following '
'message: \n{}\n(y/n)? '.format(message))
try:
# python3
reset = input(prompt)
except UnicodeEncodeError:
# python2
reset = input(prompt.encode(sys.stdout.encoding))
if reset in ('n', 'N', 'no', 'No', 'No'):
initialize_payment_msg()
config.read(CONFIG_PATH)
return config.get('message', 'payment')
|
{
"content_hash": "76a854e260d6be968f5297a7971f0061",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 79,
"avg_line_length": 33.86363636363637,
"alnum_prop": 0.6483221476510067,
"repo_name": "brianzq/att-bill-splitter",
"id": "0fedae1f3cd7f80d6c24b96d0afa4273a9a34cf7",
"size": "3748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "attbillsplitter/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import time
import pickle
import psycopg2
import psycopg2.extensions
import psycopg2.extras
from psycopg2.extensions import b
from testutils import unittest, ConnectingTestCase, skip_before_postgres
from testutils import skip_if_no_namedtuple, skip_if_no_getrefcount
from testutils import skip_if_no_superuser, skip_if_windows
class CursorTests(ConnectingTestCase):
def test_close_idempotent(self):
cur = self.conn.cursor()
cur.close()
cur.close()
self.assert_(cur.closed)
def test_empty_query(self):
cur = self.conn.cursor()
self.assertRaises(psycopg2.ProgrammingError, cur.execute, "")
self.assertRaises(psycopg2.ProgrammingError, cur.execute, " ")
self.assertRaises(psycopg2.ProgrammingError, cur.execute, ";")
def test_executemany_propagate_exceptions(self):
conn = self.conn
cur = conn.cursor()
cur.execute("create temp table test_exc (data int);")
def buggygen():
yield 1 // 0
self.assertRaises(ZeroDivisionError,
cur.executemany, "insert into test_exc values (%s)", buggygen())
cur.close()
def test_mogrify_unicode(self):
conn = self.conn
cur = conn.cursor()
# test consistency between execute and mogrify.
# unicode query containing only ascii data
cur.execute(u"SELECT 'foo';")
self.assertEqual('foo', cur.fetchone()[0])
self.assertEqual(b("SELECT 'foo';"), cur.mogrify(u"SELECT 'foo';"))
conn.set_client_encoding('UTF8')
snowman = u"\u2603"
# unicode query with non-ascii data
cur.execute(u"SELECT '%s';" % snowman)
self.assertEqual(snowman.encode('utf8'), b(cur.fetchone()[0]))
self.assertEqual(("SELECT '%s';" % snowman).encode('utf8'),
cur.mogrify(u"SELECT '%s';" % snowman).replace(b("E'"), b("'")))
# unicode args
cur.execute("SELECT %s;", (snowman,))
self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0]))
self.assertEqual(("SELECT '%s';" % snowman).encode('utf8'),
cur.mogrify("SELECT %s;", (snowman,)).replace(b("E'"), b("'")))
# unicode query and args
cur.execute(u"SELECT %s;", (snowman,))
self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0]))
self.assertEqual(("SELECT '%s';" % snowman).encode('utf8'),
cur.mogrify(u"SELECT %s;", (snowman,)).replace(b("E'"), b("'")))
def test_mogrify_decimal_explodes(self):
# issue #7: explodes on windows with python 2.5 and psycopg 2.2.2
try:
from decimal import Decimal
except:
return
conn = self.conn
cur = conn.cursor()
self.assertEqual(b('SELECT 10.3;'),
cur.mogrify("SELECT %s;", (Decimal("10.3"),)))
@skip_if_no_getrefcount
def test_mogrify_leak_on_multiple_reference(self):
# issue #81: reference leak when a parameter value is referenced
# more than once from a dict.
cur = self.conn.cursor()
i = lambda x: x
foo = i('foo') * 10
import sys
nref1 = sys.getrefcount(foo)
cur.mogrify("select %(foo)s, %(foo)s, %(foo)s", {'foo': foo})
nref2 = sys.getrefcount(foo)
self.assertEqual(nref1, nref2)
def test_bad_placeholder(self):
cur = self.conn.cursor()
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo", {})
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo", {'foo': 1})
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo, %(bar)", {'foo': 1})
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo, %(bar)", {'foo': 1, 'bar': 2})
def test_cast(self):
curs = self.conn.cursor()
self.assertEqual(42, curs.cast(20, '42'))
self.assertAlmostEqual(3.14, curs.cast(700, '3.14'))
try:
from decimal import Decimal
except ImportError:
self.assertAlmostEqual(123.45, curs.cast(1700, '123.45'))
else:
self.assertEqual(Decimal('123.45'), curs.cast(1700, '123.45'))
from datetime import date
self.assertEqual(date(2011,1,2), curs.cast(1082, '2011-01-02'))
self.assertEqual("who am i?", curs.cast(705, 'who am i?')) # unknown
def test_cast_specificity(self):
curs = self.conn.cursor()
self.assertEqual("foo", curs.cast(705, 'foo'))
D = psycopg2.extensions.new_type((705,), "DOUBLING", lambda v, c: v * 2)
psycopg2.extensions.register_type(D, self.conn)
self.assertEqual("foofoo", curs.cast(705, 'foo'))
T = psycopg2.extensions.new_type((705,), "TREBLING", lambda v, c: v * 3)
psycopg2.extensions.register_type(T, curs)
self.assertEqual("foofoofoo", curs.cast(705, 'foo'))
curs2 = self.conn.cursor()
self.assertEqual("foofoo", curs2.cast(705, 'foo'))
def test_weakref(self):
from weakref import ref
curs = self.conn.cursor()
w = ref(curs)
del curs
import gc; gc.collect()
self.assert_(w() is None)
def test_null_name(self):
curs = self.conn.cursor(None)
self.assertEqual(curs.name, None)
def test_invalid_name(self):
curs = self.conn.cursor()
curs.execute("create temp table invname (data int);")
for i in (10,20,30):
curs.execute("insert into invname values (%s)", (i,))
curs.close()
curs = self.conn.cursor(r'1-2-3 \ "test"')
curs.execute("select data from invname order by data")
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
def _create_withhold_table(self):
curs = self.conn.cursor()
try:
curs.execute("drop table withhold")
except psycopg2.ProgrammingError:
self.conn.rollback()
curs.execute("create table withhold (data int)")
for i in (10, 20, 30):
curs.execute("insert into withhold values (%s)", (i,))
curs.close()
def test_withhold(self):
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
withhold=True)
self._create_withhold_table()
curs = self.conn.cursor("W")
self.assertEqual(curs.withhold, False)
curs.withhold = True
self.assertEqual(curs.withhold, True)
curs.execute("select data from withhold order by data")
self.conn.commit()
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
curs.close()
curs = self.conn.cursor("W", withhold=True)
self.assertEqual(curs.withhold, True)
curs.execute("select data from withhold order by data")
self.conn.commit()
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
curs = self.conn.cursor()
curs.execute("drop table withhold")
self.conn.commit()
def test_withhold_no_begin(self):
self._create_withhold_table()
curs = self.conn.cursor("w", withhold=True)
curs.execute("select data from withhold order by data")
self.assertEqual(curs.fetchone(), (10,))
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
self.conn.commit()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
self.assertEqual(curs.fetchone(), (20,))
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
curs.close()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
def test_withhold_autocommit(self):
self._create_withhold_table()
self.conn.commit()
self.conn.autocommit = True
curs = self.conn.cursor("w", withhold=True)
curs.execute("select data from withhold order by data")
self.assertEqual(curs.fetchone(), (10,))
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
self.conn.commit()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
curs.close()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
def test_scrollable(self):
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
scrollable=True)
curs = self.conn.cursor()
curs.execute("create table scrollable (data int)")
curs.executemany("insert into scrollable values (%s)",
[(i,) for i in range(100)])
curs.close()
for t in range(2):
if not t:
curs = self.conn.cursor("S")
self.assertEqual(curs.scrollable, None)
curs.scrollable = True
else:
curs = self.conn.cursor("S", scrollable=True)
self.assertEqual(curs.scrollable, True)
curs.itersize = 10
# complex enough to make postgres cursors declare without
# scroll/no scroll to fail
curs.execute("""
select x.data
from scrollable x
join scrollable y on x.data = y.data
order by y.data""")
for i, (n,) in enumerate(curs):
self.assertEqual(i, n)
curs.scroll(-1)
for i in range(99, -1, -1):
curs.scroll(-1)
self.assertEqual(i, curs.fetchone()[0])
curs.scroll(-1)
curs.close()
def test_not_scrollable(self):
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
scrollable=False)
curs = self.conn.cursor()
curs.execute("create table scrollable (data int)")
curs.executemany("insert into scrollable values (%s)",
[(i,) for i in range(100)])
curs.close()
curs = self.conn.cursor("S") # default scrollability
curs.execute("select * from scrollable")
self.assertEqual(curs.scrollable, None)
curs.scroll(2)
try:
curs.scroll(-1)
except psycopg2.OperationalError:
return self.skipTest("can't evaluate non-scrollable cursor")
curs.close()
curs = self.conn.cursor("S", scrollable=False)
self.assertEqual(curs.scrollable, False)
curs.execute("select * from scrollable")
curs.scroll(2)
self.assertRaises(psycopg2.OperationalError, curs.scroll, -1)
@skip_before_postgres(8, 2)
def test_iter_named_cursor_efficient(self):
curs = self.conn.cursor('tmp')
# if these records are fetched in the same roundtrip their
# timestamp will not be influenced by the pause in Python world.
curs.execute("""select clock_timestamp() from generate_series(1,2)""")
i = iter(curs)
t1 = (i.next())[0] # the brackets work around a 2to3 bug
time.sleep(0.2)
t2 = (i.next())[0]
self.assert_((t2 - t1).microseconds * 1e-6 < 0.1,
"named cursor records fetched in 2 roundtrips (delta: %s)"
% (t2 - t1))
@skip_before_postgres(8, 0)
def test_iter_named_cursor_default_itersize(self):
curs = self.conn.cursor('tmp')
curs.execute('select generate_series(1,50)')
rv = [(r[0], curs.rownumber) for r in curs]
# everything swallowed in one gulp
self.assertEqual(rv, [(i,i) for i in range(1,51)])
@skip_before_postgres(8, 0)
def test_iter_named_cursor_itersize(self):
curs = self.conn.cursor('tmp')
curs.itersize = 30
curs.execute('select generate_series(1,50)')
rv = [(r[0], curs.rownumber) for r in curs]
# everything swallowed in two gulps
self.assertEqual(rv, [(i,((i - 1) % 30) + 1) for i in range(1,51)])
@skip_before_postgres(8, 0)
def test_iter_named_cursor_rownumber(self):
curs = self.conn.cursor('tmp')
# note: this fails if itersize < dataset: internally we check
# rownumber == rowcount to detect when to read anoter page, so we
# would need an extra attribute to have a monotonic rownumber.
curs.itersize = 20
curs.execute('select generate_series(1,10)')
for i, rec in enumerate(curs):
self.assertEqual(i + 1, curs.rownumber)
@skip_if_no_namedtuple
def test_namedtuple_description(self):
curs = self.conn.cursor()
curs.execute("""select
3.14::decimal(10,2) as pi,
'hello'::text as hi,
'2010-02-18'::date as now;
""")
self.assertEqual(len(curs.description), 3)
for c in curs.description:
self.assertEqual(len(c), 7) # DBAPI happy
for a in ('name', 'type_code', 'display_size', 'internal_size',
'precision', 'scale', 'null_ok'):
self.assert_(hasattr(c, a), a)
c = curs.description[0]
self.assertEqual(c.name, 'pi')
self.assert_(c.type_code in psycopg2.extensions.DECIMAL.values)
self.assert_(c.internal_size > 0)
self.assertEqual(c.precision, 10)
self.assertEqual(c.scale, 2)
c = curs.description[1]
self.assertEqual(c.name, 'hi')
self.assert_(c.type_code in psycopg2.STRING.values)
self.assert_(c.internal_size < 0)
self.assertEqual(c.precision, None)
self.assertEqual(c.scale, None)
c = curs.description[2]
self.assertEqual(c.name, 'now')
self.assert_(c.type_code in psycopg2.extensions.DATE.values)
self.assert_(c.internal_size > 0)
self.assertEqual(c.precision, None)
self.assertEqual(c.scale, None)
def test_pickle_description(self):
curs = self.conn.cursor()
curs.execute('SELECT 1 AS foo')
description = curs.description
pickled = pickle.dumps(description, pickle.HIGHEST_PROTOCOL)
unpickled = pickle.loads(pickled)
self.assertEqual(description, unpickled)
@skip_before_postgres(8, 0)
def test_named_cursor_stealing(self):
# you can use a named cursor to iterate on a refcursor created
# somewhere else
cur1 = self.conn.cursor()
cur1.execute("DECLARE test CURSOR WITHOUT HOLD "
" FOR SELECT generate_series(1,7)")
cur2 = self.conn.cursor('test')
# can call fetch without execute
self.assertEqual((1,), cur2.fetchone())
self.assertEqual([(2,), (3,), (4,)], cur2.fetchmany(3))
self.assertEqual([(5,), (6,), (7,)], cur2.fetchall())
@skip_before_postgres(8, 0)
def test_scroll(self):
cur = self.conn.cursor()
cur.execute("select generate_series(0,9)")
cur.scroll(2)
self.assertEqual(cur.fetchone(), (2,))
cur.scroll(2)
self.assertEqual(cur.fetchone(), (5,))
cur.scroll(2, mode='relative')
self.assertEqual(cur.fetchone(), (8,))
cur.scroll(-1)
self.assertEqual(cur.fetchone(), (8,))
cur.scroll(-2)
self.assertEqual(cur.fetchone(), (7,))
cur.scroll(2, mode='absolute')
self.assertEqual(cur.fetchone(), (2,))
# on the boundary
cur.scroll(0, mode='absolute')
self.assertEqual(cur.fetchone(), (0,))
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, -1, mode='absolute')
cur.scroll(0, mode='absolute')
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, -1)
cur.scroll(9, mode='absolute')
self.assertEqual(cur.fetchone(), (9,))
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, 10, mode='absolute')
cur.scroll(9, mode='absolute')
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, 1)
@skip_before_postgres(8, 0)
def test_scroll_named(self):
cur = self.conn.cursor('tmp', scrollable=True)
cur.execute("select generate_series(0,9)")
cur.scroll(2)
self.assertEqual(cur.fetchone(), (2,))
cur.scroll(2)
self.assertEqual(cur.fetchone(), (5,))
cur.scroll(2, mode='relative')
self.assertEqual(cur.fetchone(), (8,))
cur.scroll(9, mode='absolute')
self.assertEqual(cur.fetchone(), (9,))
def test_bad_subclass(self):
# check that we get an error message instead of a segfault
# for badly written subclasses.
# see http://stackoverflow.com/questions/22019341/
class StupidCursor(psycopg2.extensions.cursor):
def __init__(self, *args, **kwargs):
# I am stupid so not calling superclass init
pass
cur = StupidCursor()
self.assertRaises(psycopg2.InterfaceError, cur.execute, 'select 1')
self.assertRaises(psycopg2.InterfaceError, cur.executemany,
'select 1', [])
def test_callproc_badparam(self):
cur = self.conn.cursor()
self.assertRaises(TypeError, cur.callproc, 'lower', 42)
@skip_if_no_superuser
@skip_if_windows
@skip_before_postgres(8, 4)
def test_external_close_sync(self):
# If a "victim" connection is closed by a "control" connection
# behind psycopg2's back, psycopg2 always handles it correctly:
# raise OperationalError, set conn.closed to 2. This reproduces
# issue #443, a race between control_conn closing victim_conn and
# psycopg2 noticing.
control_conn = self.conn
connect_func = self.connect
wait_func = lambda conn: None
self._test_external_close(control_conn, connect_func, wait_func)
@skip_if_no_superuser
@skip_if_windows
@skip_before_postgres(8, 4)
def test_external_close_async(self):
# Issue #443 is in the async code too. Since the fix is duplicated,
# so is the test.
control_conn = self.conn
connect_func = lambda: self.connect(async=True)
wait_func = psycopg2.extras.wait_select
self._test_external_close(control_conn, connect_func, wait_func)
def _test_external_close(self, control_conn, connect_func, wait_func):
# The short sleep before using victim_conn the second time makes it
# much more likely to lose the race and see the bug. Repeating the
# test several times makes it even more likely.
for i in range(10):
victim_conn = connect_func()
wait_func(victim_conn)
with victim_conn.cursor() as cur:
cur.execute('select pg_backend_pid()')
wait_func(victim_conn)
pid1 = cur.fetchall()[0][0]
with control_conn.cursor() as cur:
cur.execute('select pg_terminate_backend(%s)', (pid1,))
def f():
with victim_conn.cursor() as cur:
cur.execute('select 1')
wait_func(victim_conn)
time.sleep(0.001)
self.assertRaises(psycopg2.OperationalError, f)
self.assertEqual(victim_conn.closed, 2)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "7b1dc531e7cd78f5e1b59ff384da8bc4",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 80,
"avg_line_length": 38.220338983050844,
"alnum_prop": 0.5955161369795516,
"repo_name": "zxjzxj9/FlaskBoard",
"id": "e435bcc1a744c59c3504d5c38448d7b1216c895e",
"size": "21326",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "web/lib/python2.7/site-packages/psycopg2/tests/test_cursor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6231"
},
{
"name": "HTML",
"bytes": "15432"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Python",
"bytes": "3677363"
},
{
"name": "Shell",
"bytes": "3234"
}
],
"symlink_target": ""
}
|
import os
from PyQt4 import QtGui
from PyQt4.phonon import Phonon
if __name__ == "__main__":
app = QtGui.QApplication([])
app.setApplicationName("Phonon Video Player")
file_path = "i.mpg"
media_src = Phonon.MediaSource(file_path)
media_obj = Phonon.MediaObject()
media_obj.setCurrentSource(media_src)
video_widget = Phonon.VideoWidget()
Phonon.createPath(media_obj, video_widget)
audio_out = Phonon.AudioOutput(Phonon.VideoCategory)
Phonon.createPath(media_obj, audio_out)
video_widget.show()
media_obj.play()
print "Play media"
app.exec_()
|
{
"content_hash": "d1767257060755d2efc13aaba7b1bf58",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 56,
"avg_line_length": 24.16,
"alnum_prop": 0.6821192052980133,
"repo_name": "CospanDesign/python",
"id": "a956ea08385b11f00955ba1f0a2730646228bae5",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyqt/getting_started/phonon_example3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "124288"
},
{
"name": "C++",
"bytes": "7418"
},
{
"name": "Jupyter Notebook",
"bytes": "802"
},
{
"name": "Makefile",
"bytes": "1265"
},
{
"name": "Python",
"bytes": "907010"
},
{
"name": "SWIG",
"bytes": "321"
},
{
"name": "Shell",
"bytes": "8831"
},
{
"name": "Verilog",
"bytes": "267332"
}
],
"symlink_target": ""
}
|
'''
The service module for OpenBSD
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import map # pylint: disable=import-error,redefined-builtin
# Import Salt libs
import salt.utils
log = logging.getLogger(__name__)
# XXX enable/disable support would be nice
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'reload_': 'reload'
}
def __virtual__():
'''
Only work on OpenBSD
'''
if __grains__['os'] == 'OpenBSD' and os.path.exists('/etc/rc.d/rc.subr'):
krel = list(list(map(int, __grains__['kernelrelease'].split('.'))))
# The -f flag, used to force a script to run even if disabled,
# was added after the 5.0 release.
# the rcctl(8) command is the preferred way to manage services.
if krel[0] > 5 or (krel[0] == 5 and krel[1] > 0):
if not os.path.exists('/usr/sbin/rcctl'):
return __virtualname__
return False
def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '/etc/rc.d/{0} -f start'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
cmd = '/etc/rc.d/{0} -f stop'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
cmd = '/etc/rc.d/{0} -f restart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name, sig=None):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
if sig:
return bool(__salt__['status.pid'](sig))
cmd = '/etc/rc.d/{0} -f check'.format(name)
return not __salt__['cmd.retcode'](cmd)
def reload_(name):
'''
.. versionadded:: 2014.7.0
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
cmd = '/etc/rc.d/{0} -f reload'.format(name)
return not __salt__['cmd.retcode'](cmd)
import re
service_flags_regex = re.compile(r'^\s*(\w[\d\w]*)_flags=(?:(NO)|.*)$')
pkg_scripts_regex = re.compile(r'^\s*pkg_scripts=\'(.*)\'$')
start_daemon_call_regex = re.compile(r'(\s*start_daemon(?!\(\)))')
start_daemon_parameter_regex = re.compile(r'(?:\s+(\w[\w\d]*))')
def _get_rc():
'''
Returns a dict where the key is the daemon's name and
the value a boolean indicating its status (True: enabled or False: disabled).
Check the daemons started by the system in /etc/rc and
configured in /etc/rc.conf and /etc/rc.conf.local.
Also add to the dict all the localy enabled daemons via $pkg_scripts.
'''
daemons_flags = {}
try:
# now read the system startup script /etc/rc
# to know what are the system enabled daemons
with salt.utils.fopen('/etc/rc', 'r') as handle:
lines = handle.readlines()
except IOError:
log.error('Unable to read /etc/rc')
else:
for line in lines:
match = start_daemon_call_regex.match(line)
if match:
# the matched line is a call to start_daemon()
# we remove the function name
line = line[len(match.group(1)):]
# we retrieve each daemon name from the parameters of start_daemon()
for daemon in start_daemon_parameter_regex.findall(line):
# mark it as enabled
daemons_flags[daemon] = True
# this will execute rc.conf and rc.conf.local
# used in /etc/rc at boot to start the daemons
variables = __salt__['cmd.run']('(. /etc/rc.conf && set)',
clean_env=True,
output_loglevel='quiet',
python_shell=True).split('\n')
for var in variables:
match = service_flags_regex.match(var)
if match:
# the matched var look like daemon_name_flags=, we test its assigned value
# NO: disabled, everything else: enabled
# do not create a new key if the service hasn't been found in /etc/rc, see $pkg_scripts
if match.group(2) == 'NO':
daemons_flags[match.group(1)] = False
else:
match = pkg_scripts_regex.match(var)
if match:
# the matched var is pkg_scripts
# we can retrieve the name of each localy enabled daemon that wasn't hand started via /etc/rc
for daemon in match.group(1).split():
# create a new key and mark it as enabled
daemons_flags[daemon] = True
return daemons_flags
def available(name):
'''
.. versionadded:: 2014.7.0
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
path = '/etc/rc.d/{0}'.format(name)
return os.path.isfile(path) and os.access(path, os.X_OK)
def missing(name):
'''
.. versionadded:: 2014.7.0
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
'''
return not available(name)
def get_all():
'''
.. versionadded:: 2014.7.0
Return all available boot services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
services = []
if not os.path.isdir('/etc/rc.d'):
return services
for service in os.listdir('/etc/rc.d'):
# this will remove rc.subr and all non executable files
if available(service):
services.append(service)
return sorted(services)
def get_enabled():
'''
.. versionadded:: 2014.7.0
Return a list of service that are enabled on boot
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
services = []
for daemon, is_enabled in six.iteritems(_get_rc()):
if is_enabled:
services.append(daemon)
return sorted(set(get_all()) & set(services))
def enabled(name, **kwargs):
'''
.. versionadded:: 2014.7.0
Return True if the named service is enabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
return name in get_enabled()
def get_disabled():
'''
.. versionadded:: 2014.7.0
Return a set of services that are installed but disabled
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
services = []
for daemon, is_enabled in six.iteritems(_get_rc()):
if not is_enabled:
services.append(daemon)
return sorted(set(get_all()) & set(services))
def disabled(name):
'''
.. versionadded:: 2014.7.0
Return True if the named service is disabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return name in get_disabled()
|
{
"content_hash": "a14d0d5f9f86f45493d1b02915ae248d",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 109,
"avg_line_length": 25.14569536423841,
"alnum_prop": 0.5800632077956281,
"repo_name": "smallyear/linuxLearn",
"id": "470a414135481ed755fbdc36391845fc8a088bc1",
"size": "7618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/salt/modules/openbsdservice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "269"
},
{
"name": "CSS",
"bytes": "35"
},
{
"name": "HTML",
"bytes": "23373"
},
{
"name": "JavaScript",
"bytes": "510"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "12800734"
},
{
"name": "Shell",
"bytes": "240576"
}
],
"symlink_target": ""
}
|
from portality.util import url_for
from portality.core import app
from portality.events.consumer import EventConsumer
from portality import constants
from portality import models
from portality.bll import DOAJ, exceptions
from portality.lib.seamless import SeamlessException
from portality.lib.dates import human_date
class ApplicationPublisherInprogressNotify(EventConsumer):
ID = "application:publisher:inprogress:notify"
@classmethod
def consumes(cls, event):
return event.id == constants.EVENT_APPLICATION_STATUS and \
event.context.get("application") is not None and \
event.context.get("old_status") == constants.APPLICATION_STATUS_PENDING and \
event.context.get("new_status") == constants.APPLICATION_STATUS_IN_PROGRESS
@classmethod
def consume(cls, event):
app_source = event.context.get("application")
try:
application = models.Application(**app_source)
except SeamlessException as e:
raise exceptions.NoSuchObjectException("Unable to construct Application from supplied source - data structure validation error, {x}".format(x=e))
if application.owner is None:
return
svc = DOAJ.notificationsService()
notification = models.Notification()
notification.who = application.owner
notification.created_by = cls.ID
notification.classification = constants.NOTIFICATION_CLASSIFICATION_STATUS_CHANGE
title = application.bibjson().title
date_applied = human_date(application.date_applied)
volunteers = app.config.get("BASE_URL") + url_for("doaj.volunteers")
notification.long = svc.long_notification(cls.ID).format(
title=title,
date_applied=date_applied,
volunteers=volunteers
)
notification.short = svc.short_notification(cls.ID)
svc.notify(notification)
|
{
"content_hash": "13ae4faf1dcfdd93a963112c19b929ab",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 157,
"avg_line_length": 38.8,
"alnum_prop": 0.6938144329896907,
"repo_name": "DOAJ/doaj",
"id": "a8df5948f3911137d5720023ea82c94e0d6af935",
"size": "1968",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portality/events/consumers/application_publisher_inprogress_notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2399"
},
{
"name": "Dockerfile",
"bytes": "59"
},
{
"name": "HTML",
"bytes": "483733"
},
{
"name": "JavaScript",
"bytes": "952971"
},
{
"name": "Jinja",
"bytes": "15292"
},
{
"name": "Python",
"bytes": "3195030"
},
{
"name": "SCSS",
"bytes": "75276"
},
{
"name": "Shell",
"bytes": "28415"
}
],
"symlink_target": ""
}
|
from .sgd_separator import plot_sgd_separator
from .linear_regression import plot_linear_regression
from .ML_flow_chart import plot_supervised_chart, plot_unsupervised_chart
from .helpers import plot_iris_classification
|
{
"content_hash": "41b6f0277f0431a8c0ff4015279c175c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 73,
"avg_line_length": 55,
"alnum_prop": 0.8363636363636363,
"repo_name": "jljones/sklearn_pycon2014",
"id": "0723d0d0f09041cc3d9bf01706579f673b32d8f4",
"size": "220",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "notebooks/fig_code/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25856"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
import os, sys, glob, fnmatch
setup(name="SimpleCV",
version=2.0,
download_url='https://github.com/sightmachine/SimpleCV/zipball/1.3',
description="Make Computers See with SimpleCV, the Python Framework for Machine Vision",
long_description="""Framework for computer (machine) vision in Python, providing a unified, pythonic interface to image acquisition, conversion, manipulation, and feature extraction.""",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Manufacturing',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Software Development :: Libraries :: Python Modules'],
keywords='opencv, cv, machine vision, computer vision, image recognition, kinect, freenect',
author='Sight Machine Inc',
author_email='support@sightmachine.com',
url='http://simplecv.org',
license='BSD',
packages = find_packages(exclude=['ez_setup']),
zip_safe = False,
requires=['cv2','cv', 'numpy', 'scipy', 'pygame', 'pil'],
package_data = { #DO NOT REMOVE, NEEDED TO LOAD INLINE FILES i = Image('simplecv')
'SimpleCV': ['Display/Notebook/template.html',
'Display/Gtk/main.glade',
'sampleimages/*',
'Features/HaarCascades/*',
'Features/FaceRecognizerData/*'
'examples/arduino/*',
'examples/detection/*',
'examples/display/*',
'examples/kinect/*',
'examples/machine-learning/*',
'examples/manipulation/*',
'examples/tracking/*'
],
},
entry_points={
'console_scripts': [
'simplecv = SimpleCV.Shell:main',
],
},
)
|
{
"content_hash": "cf022509604d01c1b05f9e5bbe58115a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 188,
"avg_line_length": 43.96,
"alnum_prop": 0.6005459508644222,
"repo_name": "jayrambhia/SimpleCV2",
"id": "cdf359bb92cdd6f00b7f261a83435aaf5659bfe5",
"size": "2198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "46344"
},
{
"name": "JavaScript",
"bytes": "41038"
},
{
"name": "Perl",
"bytes": "5044"
},
{
"name": "Python",
"bytes": "1698883"
},
{
"name": "Shell",
"bytes": "18995"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cparte', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='challenge',
name='accept_changes',
field=models.BooleanField(default=True),
#preserve_default=True,
),
]
|
{
"content_hash": "e7432a44742e563b8d15451c92f9a633",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 52,
"avg_line_length": 21.736842105263158,
"alnum_prop": 0.5883777239709443,
"repo_name": "joausaga/participa",
"id": "ff07da5c177c79de739bb9a1e5e82fc478fc21d7",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cparte/migrations/0002_challenge_accept_changes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1300"
},
{
"name": "Python",
"bytes": "123320"
}
],
"symlink_target": ""
}
|
import ast
import sys
try:
from unqlite import UnQLite
except:
sys.exit("[!] Install the UnQlite library: pip install unqlite")
from utils import Utils
class KeyStore(object):
db = UnQLite()
# =================================================
# "private" mathods
# =================================================
# get the list of values for a given key
@staticmethod
def _get(item):
item = item.rstrip('/')
values = list()
# does the request contain a wild card value?
if "/*/" in item:
parts = item.split("*")
left = parts[0].split()[-1]
right = parts[1].split()[0] if parts[1].split() else ''
temp_vals = KeyStore.get(left)
if (isinstance(temp_vals, basestring)):
temp_vals = ast.literal_eval(temp_vals)
for temp_val in temp_vals:
if left + temp_val + right in KeyStore.db:
values.append(temp_val)
else:
if item in KeyStore.db:
values = KeyStore.db[item]
return values
# =================================================
# "public" methods
# =================================================
# Set a new value within the keystore
@staticmethod
def add(item):
item = item.rstrip('/')
if (item not in KeyStore.db):
KeyStore.db[item] = list()
if (item.count('/') > 0):
(key, value) = item.rsplit('/', 1)
values = list()
if key in KeyStore.db:
values = KeyStore._get(key)
if (isinstance(values, basestring)):
values = ast.literal_eval(values)
if value not in values:
values.append(value)
KeyStore.db[key] = values
KeyStore.add(key)
# return a list of values for a given key
@staticmethod
def get(*items):
result = list()
for item in items:
r2 = KeyStore._get(item)
if (isinstance(r2, basestring)):
r2 = ast.literal_eval(r2)
result += r2
if result:
return sorted(set(result))
return []
# remove a given key or value
@staticmethod
def rm(key):
return
# print out current KB
@staticmethod
def debug():
with KeyStore.db.cursor() as cursor:
for key, value in cursor:
print key, '=>', value
return
# dump keystore to text
@staticmethod
def dump():
dump = ""
with KeyStore.db.cursor() as cursor:
for key, values in cursor:
values = ast.literal_eval(values)
for value in values:
dump += "\n" + key + "/" + value
return dump
# save keystore to file
@staticmethod
def save(filename):
Utils.writeFile(KeyStore.dump(), filename)
return
# load keystore from file
@staticmethod
def load(filename):
lines = Utils.readFile(filename)
for line in lines:
KeyStore.add(line)
return
# -----------------------------------------------------------------------------
# main test code
# -----------------------------------------------------------------------------
if __name__ == "__main__":
print "-------------------------------------------------------------------"
# KeyStore.add("host/1.2.3.4/port/111")
# KeyStore.add("host/a.b.c.d/port/80")
# KeyStore.add("host/a.b.c.d/port/80/bob")
# KeyStore.add("host/a.b.c.d/port/80/apple")
# KeyStore.add("host/a.b.c.d/port")
# KeyStore.add("host/a.b.c.d/port/443")
KeyStore.add("host/1.1.1.1/port/80")
KeyStore.add("host/1.1.1.1/port/8080")
KeyStore.add("host/2.2.2.2/port/443")
KeyStore.add("host/2.2.2.2/port/80")
KeyStore.add("host/3.3.3.3/port/22")
KeyStore.add("host/4.4.4.4/port/25")
print "-------------------------------------------------------------------"
#KeyStore.debug()
#print KeyStore.dump()
print KeyStore.get("host/*/port/80")
print KeyStore.get("host/2.2.2./port", "host/1.1.1.1/port")
#print KeyStore.get("host")
# KeyStore.add("service/http/host/1.1.1.1/tcpport/80/product/apache/version/1.1.1.1.1.1.1")
# KeyStore.add("service/http/host/1.1.1.1/tcpport/8080/product/apache/version/1.1.1.3.3.3.3")
# KeyStore.add("service/https/host/2.2.2.2/tcpport/443/product/nginx/version/a.b.c.d")
# KeyStore.add("service/http/host/2.2.2.2/tcpport/80/product/nginx/version/a.b.c.d")
# KeyStore.add("service/ssh/host/3.3.3.3/tcpport/22/product/openssh/version/q.w.e")
# KeyStore.add("service/smtp/host/4.4.4.4/tcpport/25/product/sendmail/version/9.8.7.6")
# print "-------------------------------------------------------------------"
# KeyStore.debug()
# print KeyStore.get("service")
|
{
"content_hash": "40ad245df8cdc1b3c82dbcafed7096b6",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 96,
"avg_line_length": 33.49324324324324,
"alnum_prop": 0.49041759128505147,
"repo_name": "MooseDojo/apt2",
"id": "2a4f70b8f1c2c240596a2062b4ec3e764b2bd075",
"size": "4957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/keystore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2069"
},
{
"name": "Python",
"bytes": "227220"
},
{
"name": "Shell",
"bytes": "593"
}
],
"symlink_target": ""
}
|
"""Common trace code generation."""
# Adjust path
import os.path
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import itertools
import specs.stdapi as stdapi
def getWrapperInterfaceName(interface):
return "Wrap" + interface.expr
debug = False
class ComplexValueSerializer(stdapi.OnceVisitor):
'''Type visitors which generates serialization functions for
complex types.
Simple types are serialized inline.
'''
def __init__(self, serializer):
stdapi.OnceVisitor.__init__(self)
self.serializer = serializer
def visitVoid(self, literal):
pass
def visitLiteral(self, literal):
pass
def visitString(self, string):
pass
def visitConst(self, const):
self.visit(const.type)
def visitStruct(self, struct):
# Write array with structure's member names
numMembers = len(struct.members)
if numMembers:
# Ensure member array has nonzero length to avoid MSVC error C2466
memberNames = '_struct%s_members' % (struct.tag,)
print 'static const char * %s[%u] = {' % (memberNames, numMembers)
for type, name, in struct.members:
if name is None:
print ' "",'
else:
print ' "%s",' % (name,)
print '};'
else:
sys.stderr.write('warning: %s has no members\n' % struct.name)
memberNames = 'nullptr'
# Write structure's signature
print 'static const trace::StructSig _struct%s_sig = {' % (struct.tag,)
if struct.name is None:
structName = '""'
else:
structName = '"%s"' % struct.name
print ' %u, %s, %u, %s' % (struct.id, structName, numMembers, memberNames)
print '};'
print
def visitArray(self, array):
self.visit(array.type)
def visitAttribArray(self, array):
pass
def visitBlob(self, array):
pass
def visitEnum(self, enum):
print 'static const trace::EnumValue _enum%s_values[] = {' % (enum.tag)
for value in enum.values:
print ' {"%s", %s},' % (value, value)
print '};'
print
print 'static const trace::EnumSig _enum%s_sig = {' % (enum.tag)
print ' %u, %u, _enum%s_values' % (enum.id, len(enum.values), enum.tag)
print '};'
print
def visitBitmask(self, bitmask):
print 'static const trace::BitmaskFlag _bitmask%s_flags[] = {' % (bitmask.tag)
for value in bitmask.values:
print ' {"%s", %s},' % (value, value)
print '};'
print
print 'static const trace::BitmaskSig _bitmask%s_sig = {' % (bitmask.tag)
print ' %u, %u, _bitmask%s_flags' % (bitmask.id, len(bitmask.values), bitmask.tag)
print '};'
print
def visitPointer(self, pointer):
self.visit(pointer.type)
def visitIntPointer(self, pointer):
pass
def visitObjPointer(self, pointer):
self.visit(pointer.type)
def visitLinearPointer(self, pointer):
self.visit(pointer.type)
def visitHandle(self, handle):
self.visit(handle.type)
def visitReference(self, reference):
self.visit(reference.type)
def visitAlias(self, alias):
self.visit(alias.type)
def visitOpaque(self, opaque):
pass
def visitInterface(self, interface):
pass
def visitPolymorphic(self, polymorphic):
if not polymorphic.contextLess:
return
print 'static void _write__%s(int selector, %s const & value) {' % (polymorphic.tag, polymorphic.expr)
print ' switch (selector) {'
for cases, type in polymorphic.iterSwitch():
for case in cases:
print ' %s:' % case
self.serializer.visit(type, '(%s)(value)' % (type,))
print ' break;'
print ' }'
print '}'
print
class ValueSerializer(stdapi.Visitor, stdapi.ExpanderMixin):
'''Visitor which generates code to serialize any type.
Simple types are serialized inline here, whereas the serialization of
complex types is dispatched to the serialization functions generated by
ComplexValueSerializer visitor above.
'''
def visitLiteral(self, literal, instance):
print ' trace::localWriter.write%s(%s);' % (literal.kind, instance)
def visitString(self, string, instance):
if not string.wide:
cast = 'const char *'
suffix = 'String'
else:
cast = 'const wchar_t *'
suffix = 'WString'
if cast != string.expr:
# reinterpret_cast is necessary for GLubyte * <=> char *
instance = 'reinterpret_cast<%s>(%s)' % (cast, instance)
if string.length is not None:
length = ', %s' % self.expand(string.length)
else:
length = ''
print ' trace::localWriter.write%s(%s%s);' % (suffix, instance, length)
def visitConst(self, const, instance):
self.visit(const.type, instance)
def visitStruct(self, struct, instance):
print ' trace::localWriter.beginStruct(&_struct%s_sig);' % (struct.tag,)
for member in struct.members:
self.visitMember(member, instance)
print ' trace::localWriter.endStruct();'
def visitArray(self, array, instance):
length = '_c' + array.type.tag
index = '_i' + array.type.tag
array_length = self.expand(array.length)
print ' if (%s) {' % instance
print ' size_t %s = %s > 0 ? %s : 0;' % (length, array_length, array_length)
print ' trace::localWriter.beginArray(%s);' % length
print ' for (size_t %s = 0; %s < %s; ++%s) {' % (index, index, length, index)
print ' trace::localWriter.beginElement();'
self.visitElement(index, array.type, '(%s)[%s]' % (instance, index))
print ' trace::localWriter.endElement();'
print ' }'
print ' trace::localWriter.endArray();'
print ' } else {'
print ' trace::localWriter.writeNull();'
print ' }'
def visitAttribArray(self, array, instance):
# For each element, decide if it is a key or a value (which depends on the previous key).
# If it is a value, store it as the right type - usually int, some bitfield, or some enum.
# It is currently assumed that an unknown key means that it is followed by an int value.
# determine the array length which must be passed to writeArray() up front
count = '_c' + array.baseType.tag
print ' {'
print ' int %s;' % count
print ' for (%(c)s = 0; %(array)s && %(array)s[%(c)s] != %(terminator)s; %(c)s += 2) {' \
% {'c': count, 'array': instance, 'terminator': array.terminator}
if array.hasKeysWithoutValues:
print ' switch (int(%(array)s[%(c)s])) {' % {'array': instance, 'c': count}
for key, valueType in array.valueTypes:
if valueType is None:
print ' case %s:' % key
print ' %s--;' % count # the next value is a key again and checked if it's the terminator
print ' break;'
print ' }'
print ' }'
print ' %(c)s += %(array)s ? 1 : 0;' % {'c': count, 'array': instance}
print ' trace::localWriter.beginArray(%s);' % count
# for each key / key-value pair write the key and the value, if the key requires one
index = '_i' + array.baseType.tag
print ' for (int %(i)s = 0; %(i)s < %(count)s; %(i)s++) {' % {'i': index, 'count': count}
print ' trace::localWriter.beginElement();'
self.visit(array.baseType, "%(array)s[%(i)s]" % {'array': instance, 'i': index})
print ' trace::localWriter.endElement();'
print ' if (%(i)s + 1 >= %(count)s) {' % {'i': index, 'count': count}
print ' break;'
print ' }'
print ' switch (int(%(array)s[%(i)s++])) {' % {'array': instance, 'i': index}
# write generic value the usual way
for key, valueType in array.valueTypes:
if valueType is not None:
print ' case %s:' % key
print ' trace::localWriter.beginElement();'
self.visitElement(index, valueType, '(%(array)s)[%(i)s]' % {'array': instance, 'i': index})
print ' trace::localWriter.endElement();'
print ' break;'
# known key with no value, just decrease the index so we treat the next value as a key
if array.hasKeysWithoutValues:
for key, valueType in array.valueTypes:
if valueType is None:
print ' case %s:' % key
print ' %s--;' % index
print ' break;'
# unknown key, write an int value
print ' default:'
print ' trace::localWriter.beginElement();'
print ' os::log("apitrace: warning: %s: unknown key 0x%04X, interpreting value as int\\n", ' + \
'__FUNCTION__, int(%(array)s[%(i)s - 1]));' % {'array': instance, 'i': index}
print ' trace::localWriter.writeSInt(%(array)s[%(i)s]);' % {'array': instance, 'i': index}
print ' trace::localWriter.endElement();'
print ' break;'
print ' }'
print ' }'
print ' trace::localWriter.endArray();'
print ' }'
def visitBlob(self, blob, instance):
print ' trace::localWriter.writeBlob(%s, %s);' % (instance, self.expand(blob.size))
def visitEnum(self, enum, instance):
print ' trace::localWriter.writeEnum(&_enum%s_sig, %s);' % (enum.tag, instance)
def visitBitmask(self, bitmask, instance):
print ' trace::localWriter.writeBitmask(&_bitmask%s_sig, %s);' % (bitmask.tag, instance)
def visitPointer(self, pointer, instance):
print ' if (%s) {' % instance
print ' trace::localWriter.beginArray(1);'
print ' trace::localWriter.beginElement();'
self.visit(pointer.type, "*" + instance)
print ' trace::localWriter.endElement();'
print ' trace::localWriter.endArray();'
print ' } else {'
print ' trace::localWriter.writeNull();'
print ' }'
def visitIntPointer(self, pointer, instance):
print ' trace::localWriter.writePointer((uintptr_t)%s);' % instance
def visitObjPointer(self, pointer, instance):
print ' trace::localWriter.writePointer((uintptr_t)%s);' % instance
def visitLinearPointer(self, pointer, instance):
print ' trace::localWriter.writePointer((uintptr_t)%s);' % instance
def visitReference(self, reference, instance):
self.visit(reference.type, instance)
def visitHandle(self, handle, instance):
self.visit(handle.type, instance)
def visitAlias(self, alias, instance):
self.visit(alias.type, instance)
def visitOpaque(self, opaque, instance):
print ' trace::localWriter.writePointer((uintptr_t)%s);' % instance
def visitInterface(self, interface, instance):
assert False
def visitPolymorphic(self, polymorphic, instance):
if polymorphic.contextLess:
print ' _write__%s(%s, %s);' % (polymorphic.tag, polymorphic.switchExpr, instance)
else:
switchExpr = self.expand(polymorphic.switchExpr)
print ' switch (%s) {' % switchExpr
for cases, type in polymorphic.iterSwitch():
for case in cases:
print ' %s:' % case
caseInstance = instance
if type.expr is not None:
caseInstance = 'static_cast<%s>(%s)' % (type, caseInstance)
self.visit(type, caseInstance)
print ' break;'
if polymorphic.defaultType is None:
print r' default:'
print r' os::log("apitrace: warning: %%s: unexpected polymorphic case %%i\n", __FUNCTION__, (int)%s);' % (switchExpr,)
print r' trace::localWriter.writeNull();'
print r' break;'
print ' }'
class WrapDecider(stdapi.Traverser):
'''Type visitor which will decide wheter this type will need wrapping or not.
For complex types (arrays, structures), we need to know this before hand.
'''
def __init__(self):
self.needsWrapping = False
def visitLinearPointer(self, void):
pass
def visitObjPointer(self, interface):
self.needsWrapping = True
class ValueWrapper(stdapi.Traverser, stdapi.ExpanderMixin):
'''Type visitor which will generate the code to wrap an instance.
Wrapping is necessary mostly for interfaces, however interface pointers can
appear anywhere inside complex types.
'''
def visitStruct(self, struct, instance):
for member in struct.members:
self.visitMember(member, instance)
def visitArray(self, array, instance):
array_length = self.expand(array.length)
print " if (%s) {" % instance
print " for (size_t _i = 0, _s = %s; _i < _s; ++_i) {" % array_length
self.visitElement('_i', array.type, instance + "[_i]")
print " }"
print " }"
def visitPointer(self, pointer, instance):
print " if (%s) {" % instance
self.visit(pointer.type, "*" + instance)
print " }"
def visitObjPointer(self, pointer, instance):
elem_type = pointer.type.mutable()
if isinstance(elem_type, stdapi.Interface):
self.visitInterfacePointer(elem_type, instance)
elif isinstance(elem_type, stdapi.Alias) and isinstance(elem_type.type, stdapi.Interface):
self.visitInterfacePointer(elem_type.type, instance)
else:
# All interfaces should at least implement IUnknown
print " WrapIUnknown::_wrap(__FUNCTION__, (IUnknown **) &%s);" % (instance,)
def visitInterface(self, interface, instance):
raise NotImplementedError
def visitInterfacePointer(self, interface, instance):
print " Wrap%s::_wrap(__FUNCTION__, &%s);" % (interface.name, instance)
def visitPolymorphic(self, type, instance):
# XXX: There might be polymorphic values that need wrapping in the future
raise NotImplementedError
class ValueUnwrapper(ValueWrapper):
'''Reverse of ValueWrapper.'''
allocated = False
def visitStruct(self, struct, instance):
if not self.allocated:
# Argument is constant. We need to create a non const
print ' {'
print " %s * _t = static_cast<%s *>(alloca(sizeof *_t));" % (struct, struct)
print ' *_t = %s;' % (instance,)
assert instance.startswith('*')
print ' %s = _t;' % (instance[1:],)
instance = '*_t'
self.allocated = True
try:
return ValueWrapper.visitStruct(self, struct, instance)
finally:
print ' }'
else:
return ValueWrapper.visitStruct(self, struct, instance)
def visitArray(self, array, instance):
if self.allocated or isinstance(instance, stdapi.Interface):
return ValueWrapper.visitArray(self, array, instance)
array_length = self.expand(array.length)
elem_type = array.type.mutable()
print " if (%s && %s) {" % (instance, array_length)
print " %s * _t = static_cast<%s *>(alloca(%s * sizeof *_t));" % (elem_type, elem_type, array_length)
print " for (size_t _i = 0, _s = %s; _i < _s; ++_i) {" % array_length
print " _t[_i] = %s[_i];" % instance
self.allocated = True
self.visit(array.type, "_t[_i]")
print " }"
print " %s = _t;" % instance
print " }"
def visitInterfacePointer(self, interface, instance):
print r' Wrap%s::_unwrap(__FUNCTION__, &%s);' % (interface.name, instance)
def _getInterfaceHierarchy(allIfaces, baseIface, result):
for iface in allIfaces:
if iface.base is baseIface:
_getInterfaceHierarchy(allIfaces, iface, result)
result.append(iface)
def getInterfaceHierarchy(allIfaces, baseIface):
result = []
_getInterfaceHierarchy(allIfaces, baseIface, result)
return result
class Tracer:
'''Base class to orchestrate the code generation of API tracing.'''
# 0-3 are reserved to memcpy, malloc, free, and realloc
__id = 4
def __init__(self):
self.api = None
def serializerFactory(self):
'''Create a serializer.
Can be overriden by derived classes to inject their own serialzer.
'''
return ValueSerializer()
def traceApi(self, api):
self.api = api
self.header(api)
# Includes
for module in api.modules:
for header in module.headers:
print header
print
# Generate the serializer functions
types = api.getAllTypes()
visitor = ComplexValueSerializer(self.serializerFactory())
map(visitor.visit, types)
print
# Interfaces wrapers
self.traceInterfaces(api)
# Function wrappers
self.interface = None
self.base = None
for function in api.getAllFunctions():
self.traceFunctionDecl(function)
for function in api.getAllFunctions():
try:
self.traceFunctionImpl(function)
except:
sys.stderr.write("error: %s: exception\n" % function.name)
raise
print
self.footer(api)
def header(self, api):
print '#ifdef _WIN32'
print '# include <malloc.h> // alloca'
print '# ifndef alloca'
print '# define alloca _alloca'
print '# endif'
print '#else'
print '# include <alloca.h> // alloca'
print '#endif'
print
print
print 'static std::map<void *, void *> g_WrappedObjects;'
def footer(self, api):
pass
def traceFunctionDecl(self, function):
# Per-function declarations
if not function.internal:
if function.args:
print 'static const char * _%s_args[%u] = {%s};' % (function.name, len(function.args), ', '.join(['"%s"' % arg.name for arg in function.args]))
else:
print 'static const char ** _%s_args = NULL;' % (function.name,)
print 'static const trace::FunctionSig _%s_sig = {%u, "%s", %u, _%s_args};' % (function.name, self.getFunctionSigId(), function.sigName(), len(function.args), function.name)
print
def getFunctionSigId(self):
id = Tracer.__id
Tracer.__id += 1
return id
def isFunctionPublic(self, function):
return True
def traceFunctionImpl(self, function):
if self.isFunctionPublic(function):
print 'extern "C" PUBLIC'
else:
print 'extern "C" PRIVATE'
print function.prototype() + ' {'
if function.type is not stdapi.Void:
print ' %s _result;' % function.type
for arg in function.args:
if not arg.output:
self.unwrapArg(function, arg)
self.traceFunctionImplBody(function)
# XXX: wrapping should go here, but before we can do that we'll need to protect g_WrappedObjects with its own mutex
if function.type is not stdapi.Void:
print ' return _result;'
print '}'
print
def traceFunctionImplBody(self, function):
if not function.internal:
print ' unsigned _call = trace::localWriter.beginEnter(&_%s_sig);' % (function.name,)
for arg in function.args:
if not arg.output:
self.serializeArg(function, arg)
print ' trace::localWriter.endEnter();'
self.invokeFunction(function)
if not function.internal:
print ' trace::localWriter.beginLeave(_call);'
print ' if (%s) {' % self.wasFunctionSuccessful(function)
for arg in function.args:
if arg.output:
self.serializeArg(function, arg)
self.wrapArg(function, arg)
print ' }'
if function.type is not stdapi.Void:
self.serializeRet(function, "_result")
if function.type is not stdapi.Void:
self.wrapRet(function, "_result")
print ' trace::localWriter.endLeave();'
def invokeFunction(self, function):
self.doInvokeFunction(function)
def doInvokeFunction(self, function, prefix='_', suffix=''):
# Same as invokeFunction() but called both when trace is enabled or disabled.
if function.type is stdapi.Void:
result = ''
else:
result = '_result = '
dispatch = prefix + function.name + suffix
print ' %s%s(%s);' % (result, dispatch, ', '.join([str(arg.name) for arg in function.args]))
def wasFunctionSuccessful(self, function):
if function.type is stdapi.Void:
return 'true'
if str(function.type) == 'HRESULT':
return 'SUCCEEDED(_result)'
return 'true'
def serializeArg(self, function, arg):
print ' trace::localWriter.beginArg(%u);' % (arg.index,)
self.serializeArgValue(function, arg)
print ' trace::localWriter.endArg();'
def serializeArgValue(self, function, arg):
self.serializeValue(arg.type, arg.name)
def wrapArg(self, function, arg):
assert not isinstance(arg.type, stdapi.ObjPointer)
from specs.winapi import REFIID
riid = None
for other_arg in function.args:
if not other_arg.output and other_arg.type is REFIID:
riid = other_arg
if riid is not None \
and riid.name != 'EmulatedInterface' \
and isinstance(arg.type, stdapi.Pointer) \
and isinstance(arg.type.type, stdapi.ObjPointer):
self.wrapIid(function, riid, arg)
return
self.wrapValue(arg.type, arg.name)
def unwrapArg(self, function, arg):
self.unwrapValue(arg.type, arg.name)
def serializeRet(self, function, instance):
print ' trace::localWriter.beginReturn();'
self.serializeValue(function.type, instance)
print ' trace::localWriter.endReturn();'
def serializeValue(self, type, instance):
serializer = self.serializerFactory()
serializer.visit(type, instance)
def wrapRet(self, function, instance):
self.wrapValue(function.type, instance)
def needsWrapping(self, type):
visitor = WrapDecider()
visitor.visit(type)
return visitor.needsWrapping
def wrapValue(self, type, instance):
if self.needsWrapping(type):
visitor = ValueWrapper()
visitor.visit(type, instance)
def unwrapValue(self, type, instance):
if self.needsWrapping(type):
visitor = ValueUnwrapper()
visitor.visit(type, instance)
def traceInterfaces(self, api):
interfaces = api.getAllInterfaces()
if not interfaces:
return
print r'#include "guids.hpp"'
print
# Helper functions to wrap/unwrap interface pointers
print r'static inline bool'
print r'hasChildInterface(REFIID riid, IUnknown *pUnknown) {'
print r' IUnknown *pObj = NULL;'
print r' HRESULT hr = pUnknown->QueryInterface(riid, (VOID **)&pObj);'
print r' if (FAILED(hr)) {'
print r' return false;'
print r' }'
print r' assert(pObj);'
print r' pObj->Release();'
print r' return pUnknown == pObj;'
print r'}'
print
print r'static inline const void *'
print r'getVtbl(const void *pvObj) {'
print r' return pvObj ? *(const void **)pvObj : NULL;'
print r'}'
print
print r'static void'
print r'warnVtbl(const void *pVtbl) {'
print r' HMODULE hModule = 0;'
print r' BOOL bRet = GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |'
print r' GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,'
print r' (LPCTSTR)pVtbl,'
print r' &hModule);'
print r' assert(bRet);'
print r' if (bRet) {'
print r' char szModule[MAX_PATH];'
print r' DWORD dwRet = GetModuleFileNameA(hModule, szModule, sizeof szModule);'
print r' assert(dwRet);'
print r' if (dwRet) {'
print r' DWORD dwOffset = (UINT_PTR)pVtbl - (UINT_PTR)hModule;'
print r' os::log("apitrace: warning: pVtbl = %p (%s!+0x%0lx)\n", pVtbl, szModule, dwOffset);'
print r' }'
print r' }'
print r'}'
print
map(self.declareWrapperInterface, interfaces)
self.implementIidWrapper(api)
map(self.implementWrapperInterface, interfaces)
print
def declareWrapperInterface(self, interface):
wrapperInterfaceName = getWrapperInterfaceName(interface)
print "class %s : public %s " % (wrapperInterfaceName, interface.name)
print "{"
print "private:"
print " %s(%s * pInstance);" % (wrapperInterfaceName, interface.name)
print " ~%s(); // Not implemented" % wrapperInterfaceName
print "public:"
print " static %s* _create(const char *entryName, %s * pInstance);" % (wrapperInterfaceName, interface.name)
print " static void _wrap(const char *entryName, %s ** ppInstance);" % (interface.name,)
print " static void _unwrap(const char *entryName, %s ** pInstance);" % (interface.name,)
print
methods = list(interface.iterMethods())
for method in methods:
print " " + method.prototype() + " override;"
print
for type, name, value in self.enumWrapperInterfaceVariables(interface):
print ' %s %s;' % (type, name)
print
print r'private:'
print r' void _dummy(unsigned i) const {'
print r' os::log("error: %%s: unexpected virtual method %%i of instance pvObj=%%p pWrapper=%%p pVtbl=%%p\n", "%s", i, m_pInstance, this, m_pVtbl);' % interface.name
print r' warnVtbl(m_pVtbl);'
print r' warnVtbl(getVtbl(m_pInstance));'
print r' trace::localWriter.flush();'
print r' os::abort();'
print r' }'
print
for i in range(len(methods), 64):
print r' virtual void _dummy%i(void) const { _dummy(%i); }' % (i, i)
print
print "};"
print
def enumWrapperInterfaceVariables(self, interface):
return [
("DWORD", "m_dwMagic", "0xd8365d6c"),
("%s *" % interface.name, "m_pInstance", "pInstance"),
("const void *", "m_pVtbl", "getVtbl(pInstance)"),
("UINT", "m_NumMethods", len(list(interface.iterBaseMethods()))),
]
def implementWrapperInterface(self, iface):
self.interface = iface
wrapperInterfaceName = getWrapperInterfaceName(iface)
# Private constructor
print '%s::%s(%s * pInstance) {' % (wrapperInterfaceName, wrapperInterfaceName, iface.name)
for type, name, value in self.enumWrapperInterfaceVariables(iface):
if value is not None:
print ' %s = %s;' % (name, value)
print '}'
print
# Public constructor
print '%s *%s::_create(const char *entryName, %s * pInstance) {' % (wrapperInterfaceName, wrapperInterfaceName, iface.name)
print r' Wrap%s *pWrapper = new Wrap%s(pInstance);' % (iface.name, iface.name)
if debug:
print r' os::log("%%s: created %s pvObj=%%p pWrapper=%%p pVtbl=%%p\n", entryName, pInstance, pWrapper, pWrapper->m_pVtbl);' % iface.name
print r' g_WrappedObjects[pInstance] = pWrapper;'
print r' return pWrapper;'
print '}'
print
baseMethods = list(iface.iterBaseMethods())
for base, method in baseMethods:
self.base = base
self.implementWrapperInterfaceMethod(iface, base, method)
print
# Wrap pointer
ifaces = self.api.getAllInterfaces()
print r'void'
print r'%s::_wrap(const char *entryName, %s **ppObj) {' % (wrapperInterfaceName, iface.name)
print r' if (!ppObj) {'
print r' return;'
print r' }'
print r' %s *pObj = *ppObj;' % (iface.name,)
print r' if (!pObj) {'
print r' return;'
print r' }'
print r' assert(hasChildInterface(IID_%s, pObj));' % iface.name
print r' std::map<void *, void *>::const_iterator it = g_WrappedObjects.find(pObj);'
print r' if (it != g_WrappedObjects.end()) {'
print r' Wrap%s *pWrapper = (Wrap%s *)it->second;' % (iface.name, iface.name)
print r' assert(pWrapper);'
print r' assert(pWrapper->m_dwMagic == 0xd8365d6c);'
print r' assert(pWrapper->m_pInstance == pObj);'
print r' if (pWrapper->m_pVtbl == getVtbl(pObj) &&'
print r' pWrapper->m_NumMethods >= %s) {' % len(baseMethods)
if debug:
print r' os::log("%s: fetched pvObj=%p pWrapper=%p pVtbl=%p\n", entryName, pObj, pWrapper, pWrapper->m_pVtbl);'
print r' assert(hasChildInterface(IID_%s, pWrapper->m_pInstance));' % iface.name
print r' *ppObj = pWrapper;'
print r' return;'
print r' } else {'
if debug:
print r' os::log("%s::Release: deleted pvObj=%%p pWrapper=%%p pVtbl=%%p\n", pWrapper->m_pInstance, pWrapper, pWrapper->m_pVtbl);' % iface.name
print r' g_WrappedObjects.erase(pObj);'
print r' }'
print r' }'
for childIface in getInterfaceHierarchy(ifaces, iface):
print r' if (hasChildInterface(IID_%s, pObj)) {' % (childIface.name,)
print r' *ppObj = Wrap%s::_create(entryName, static_cast<%s *>(pObj));' % (childIface.name, childIface.name)
print r' return;'
print r' }'
print r' *ppObj = Wrap%s::_create(entryName, pObj);' % iface.name
print r'}'
print
# Unwrap pointer
print r'void'
print r'%s::_unwrap(const char *entryName, %s **ppObj) {' % (wrapperInterfaceName, iface.name)
print r' if (!ppObj || !*ppObj) {'
print r' return;'
print r' }'
print r' const %s *pWrapper = static_cast<const %s*>(*ppObj);' % (wrapperInterfaceName, getWrapperInterfaceName(iface))
print r' if (pWrapper && pWrapper->m_dwMagic == 0xd8365d6c) {'
print r' *ppObj = pWrapper->m_pInstance;'
print r' } else {'
print r' os::log("apitrace: warning: %%s: unexpected %%s pointer %%p\n", entryName, "%s", *ppObj);' % iface.name
print r' trace::localWriter.flush();'
print r' }'
print r'}'
print
def implementWrapperInterfaceMethod(self, interface, base, method):
wrapperInterfaceName = getWrapperInterfaceName(interface)
print method.prototype(wrapperInterfaceName + '::' + method.name) + ' {'
if False:
print r' os::log("%%s(%%p -> %%p)\n", "%s", this, m_pInstance);' % (wrapperInterfaceName + '::' + method.name)
if method.type is not stdapi.Void:
print ' %s _result;' % method.type
print ' %s *_this = static_cast<%s *>(m_pInstance);' % (base, base)
for arg in method.args:
if not arg.output:
self.unwrapArg(method, arg)
self.implementWrapperInterfaceMethodBody(interface, base, method)
# XXX: wrapping should go here, but before we can do that we'll need to protect g_WrappedObjects with its own mutex
if method.type is not stdapi.Void:
print ' return _result;'
print '}'
print
def implementWrapperInterfaceMethodBody(self, interface, base, method):
assert not method.internal
sigName = interface.name + '::' + method.sigName()
if method.overloaded:
# Once the method signature name goes into a trace, we'll need to
# support it indefinetely, so log them so one can make sure nothing
# weird gets baked in
sys.stderr.write('note: overloaded method %s\n' % (sigName,))
numArgs = len(method.args) + 1
print ' static const char * _args[%u] = {%s};' % (numArgs, ', '.join(['"this"'] + ['"%s"' % arg.name for arg in method.args]))
print ' static const trace::FunctionSig _sig = {%u, "%s", %u, _args};' % (self.getFunctionSigId(), sigName, numArgs)
print ' unsigned _call = trace::localWriter.beginEnter(&_sig);'
print ' trace::localWriter.beginArg(0);'
print ' trace::localWriter.writePointer((uintptr_t)m_pInstance);'
print ' trace::localWriter.endArg();'
for arg in method.args:
if not arg.output:
self.serializeArg(method, arg)
print ' trace::localWriter.endEnter();'
self.invokeMethod(interface, base, method)
print ' trace::localWriter.beginLeave(_call);'
print ' if (%s) {' % self.wasFunctionSuccessful(method)
for arg in method.args:
if arg.output:
self.serializeArg(method, arg)
self.wrapArg(method, arg)
print ' }'
if method.type is not stdapi.Void:
self.serializeRet(method, '_result')
if method.type is not stdapi.Void:
self.wrapRet(method, '_result')
if method.name == 'Release':
assert method.type is not stdapi.Void
print r' if (!_result) {'
print r' // NOTE: Must not delete the wrapper here. See'
print r' // https://github.com/apitrace/apitrace/issues/462'
print r' }'
print ' trace::localWriter.endLeave();'
def implementIidWrapper(self, api):
ifaces = api.getAllInterfaces()
print r'static void'
print r'warnIID(const char *entryName, REFIID riid, void *pvObj, const char *reason) {'
print r' os::log("apitrace: warning: %s: %s IID %s\n",'
print r' entryName, reason,'
print r' getGuidName(riid));'
print r' const void * pVtbl = getVtbl(pvObj);'
print r' warnVtbl(pVtbl);'
print r'}'
print
print r'static void'
print r'wrapIID(const char *entryName, REFIID riid, void * * ppvObj) {'
print r' if (!ppvObj || !*ppvObj) {'
print r' return;'
print r' }'
for iface in ifaces:
print r' if (riid == IID_%s) {' % (iface.name,)
print r' Wrap%s::_wrap(entryName, (%s **) ppvObj);' % (iface.name, iface.name)
print r' return;'
print r' }'
print r' warnIID(entryName, riid, *ppvObj, "unsupported");'
print r'}'
print
def wrapIid(self, function, riid, out):
# Cast output arg to `void **` if necessary
out_name = out.name
obj_type = out.type.type.type
if not obj_type is stdapi.Void:
assert isinstance(obj_type, stdapi.Interface)
out_name = 'reinterpret_cast<void * *>(%s)' % out_name
print r' if (%s && *%s) {' % (out.name, out.name)
functionName = function.name
else_ = ''
if self.interface is not None:
functionName = self.interface.name + '::' + functionName
print r' if (*%s == m_pInstance &&' % (out_name,)
print r' (%s)) {' % ' || '.join('%s == IID_%s' % (riid.name, iface.name) for iface in self.interface.iterBases())
print r' *%s = this;' % (out_name,)
print r' }'
else_ = 'else '
print r' %s{' % else_
print r' wrapIID("%s", %s, %s);' % (functionName, riid.name, out_name)
print r' }'
print r' }'
def invokeMethod(self, interface, base, method):
if method.type is stdapi.Void:
result = ''
else:
result = '_result = '
print ' %s_this->%s(%s);' % (result, method.name, ', '.join([str(arg.name) for arg in method.args]))
def emit_memcpy(self, ptr, size):
print ' trace::fakeMemcpy(%s, %s);' % (ptr, size)
def fake_call(self, function, args):
print ' {'
print ' unsigned _fake_call = trace::localWriter.beginEnter(&_%s_sig, true);' % (function.name,)
for arg, instance in zip(function.args, args):
assert not arg.output
print ' trace::localWriter.beginArg(%u);' % (arg.index,)
self.serializeValue(arg.type, instance)
print ' trace::localWriter.endArg();'
print ' trace::localWriter.endEnter();'
print ' trace::localWriter.beginLeave(_fake_call);'
print ' trace::localWriter.endLeave();'
print ' }'
|
{
"content_hash": "295a57712b2078861b0cb2d30b340302",
"timestamp": "",
"source": "github",
"line_count": 976,
"max_line_length": 185,
"avg_line_length": 39.377049180327866,
"alnum_prop": 0.5532889258950874,
"repo_name": "schulmar/apitrace",
"id": "242785c3824109a1035dd95fff6b8a1a063c6994",
"size": "39703",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wrappers/trace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9563"
},
{
"name": "C++",
"bytes": "7969805"
},
{
"name": "CMake",
"bytes": "66632"
},
{
"name": "Emacs Lisp",
"bytes": "204"
},
{
"name": "Java",
"bytes": "16257"
},
{
"name": "Makefile",
"bytes": "1056"
},
{
"name": "Objective-C++",
"bytes": "14170"
},
{
"name": "Python",
"bytes": "2018017"
},
{
"name": "Shell",
"bytes": "1037"
}
],
"symlink_target": ""
}
|
""" S3 SQL Forms
@copyright: 2012-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3SQLCustomForm",
"S3SQLDefaultForm",
"S3SQLDummyField",
"S3SQLInlineInstruction",
"S3SQLSectionBreak",
"S3SQLVirtualField",
"S3SQLSubFormLayout",
"S3SQLVerticalSubFormLayout",
"S3SQLInlineComponent",
"S3SQLInlineLink",
"S3WithIntro",
)
import json
from itertools import chain
from gluon import *
from gluon.storage import Storage
from gluon.sqlhtml import StringWidget
from gluon.tools import callback
from gluon.validators import Validator
from s3dal import Field, original_tablename
from .s3query import FS
from .s3utils import s3_mark_required, s3_store_last_record_id, s3_str, s3_validate
from .s3widgets import S3Selector, S3UploadWidget
from .s3validators import JSONERRORS
# Compact JSON encoding
SEPARATORS = (",", ":")
DEFAULT = lambda: None
# =============================================================================
class S3SQLForm(object):
""" SQL Form Base Class"""
# -------------------------------------------------------------------------
def __init__(self, *elements, **attributes):
"""
Constructor to define the form and its elements.
@param elements: the form elements
@param attributes: form attributes
"""
self.elements = []
append = self.elements.append
debug = current.deployment_settings.get_base_debug()
for element in elements:
if not element:
continue
if isinstance(element, S3SQLFormElement):
append(element)
elif isinstance(element, str):
append(S3SQLField(element))
elif isinstance(element, tuple):
l = len(element)
if l > 1:
label, selector = element[:2]
widget = element[2] if l > 2 else DEFAULT
else:
selector = element[0]
label = widget = DEFAULT
append(S3SQLField(selector, label=label, widget=widget))
else:
msg = "Invalid form element: %s" % str(element)
if debug:
raise SyntaxError(msg)
else:
current.log.error(msg)
opts = {}
attr = {}
for k in attributes:
value = attributes[k]
if k[:1] == "_":
attr[k] = value
else:
opts[k] = value
self.attr = attr
self.opts = opts
self.prefix = None
self.name = None
self.resource = None
self.tablename = None
self.table = None
self.record_id = None
self.subtables = None
self.subrows = None
self.components = None
# -------------------------------------------------------------------------
# Rendering/Processing
# -------------------------------------------------------------------------
def __call__(self,
request = None,
resource = None,
record_id = None,
readonly = False,
message = "Record created/updated",
format = None,
**options):
"""
Render/process the form. To be implemented in subclass.
@param request: the S3Request
@param resource: the target S3Resource
@param record_id: the record ID
@param readonly: render the form read-only
@param message: message upon successful form submission
@param format: data format extension (for audit)
@param options: keyword options for the form
@return: a FORM instance
"""
return None
# -------------------------------------------------------------------------
# Utility functions
# -------------------------------------------------------------------------
def __len__(self):
"""
Support len(crud_form)
"""
return len(self.elements)
# -------------------------------------------------------------------------
def _config(self, key, default=None):
"""
Get a configuration setting for the current table
@param key: the setting key
@param default: fallback value if the setting is not available
"""
tablename = self.tablename
if tablename:
return current.s3db.get_config(tablename, key, default)
else:
return default
# -------------------------------------------------------------------------
@staticmethod
def _submit_buttons(readonly=False):
"""
Render submit buttons
@param readonly: render the form read-only
@return: list of submit buttons
"""
T = current.T
s3 = current.response.s3
settings = s3.crud
if settings.custom_submit:
submit = [(None,
settings.submit_button,
settings.submit_style)]
submit.extend(settings.custom_submit)
buttons = []
for name, label, _class in submit:
if isinstance(label, str):
label = T(label)
button = INPUT(_type = "submit",
_class = "btn crud-submit-button",
_name = name,
_value = label,
)
if _class:
button.add_class(_class)
buttons.append(button)
else:
buttons = ["submit"]
# Cancel button
if not readonly and s3.cancel:
if not settings.custom_submit:
if settings.submit_button:
submit_label = T(settings.submit_button)
else:
submit_label = T("Save")
submit_button = INPUT(_type = "submit",
_value = submit_label,
)
if settings.submit_style:
submit_button.add_class(settings.submit_style)
buttons = [submit_button]
cancel = s3.cancel
if isinstance(cancel, DIV):
cancel_button = cancel
else:
cancel_button = A(T("Cancel"),
_class = "cancel-form-btn action-lnk",
)
if isinstance(cancel, dict):
# Script-controlled cancel button (embedded form)
if "script" in cancel:
# Custom script
script = cancel["script"]
else:
# Default script: hide form, show add-button
script = \
'''$('.cancel-form-btn').click(function(){$('#%(hide)s').slideUp('medium',function(){$('#%(show)s').show()})})'''
s3.jquery_ready.append(script % cancel)
elif s3.cancel is True:
cancel_button.add_class("s3-cancel")
else:
cancel_button.update(_href = s3.cancel)
buttons.append(cancel_button)
return buttons
# -------------------------------------------------------------------------
@staticmethod
def _insert_subheadings(form, tablename, formstyle, subheadings):
"""
Insert subheadings into forms
@param form: the form
@param tablename: the tablename
@param formstyle: the formstyle
@param subheadings:
{"fieldname": "Heading"} or {"fieldname": ["Heading1", "Heading2"]}
"""
if not subheadings:
return
if tablename in subheadings:
subheadings = subheadings.get(tablename)
if formstyle.__name__ in ("formstyle_table",
"formstyle_table_inline",
):
def create_subheading(represent, tablename, f, level=""):
return TR(TD(represent, _colspan=3,
_class="subheading",
),
_class = "subheading",
_id = "%s_%s__subheading%s" % (tablename, f, level),
)
else:
def create_subheading(represent, tablename, f, level=""):
return DIV(represent,
_class = "subheading",
_id = "%s_%s__subheading%s" % (tablename, f, level),
)
form_rows = iter(form[0])
tr = next(form_rows)
i = 0
while tr:
# @ToDo: We need a better way of working than this!
f = tr.attributes.get("_id", None)
if not f:
try:
# DIV-based form-style
f = tr[0][0].attributes.get("_id", None)
if not f:
# DRRPP formstyle
f = tr[0][0][1][0].attributes.get("_id", None)
if not f:
# Date fields are inside an extra TAG()
f = tr[0][0][1][0][0].attributes.get("_id", None)
except:
# Something else
f = None
if f:
if f.endswith("__row"):
f = f[:-5]
if f.startswith(tablename):
f = f[len(tablename) + 1:] # : -6
if f.startswith("sub_"):
# Component
f = f[4:]
elif f.startswith("sub-default"):
# S3SQLInlineComponent[CheckBox]
f = f[11:]
elif f.startswith("sub_"):
# S3GroupedOptionsWidget
f = f[4:]
headings = subheadings.get(f)
if not headings:
try:
tr = next(form_rows)
except StopIteration:
break
else:
i += 1
continue
if not isinstance(headings, list):
headings = [headings]
inserted = 0
for heading in headings:
subheading = create_subheading(heading, tablename, f, inserted if inserted else "")
form[0].insert(i, subheading)
i += 1
inserted += 1
if inserted:
tr.attributes.update(_class="%s after_subheading" % tr.attributes["_class"])
for _i in range(0, inserted):
# Iterate over the rows we just created
tr = next(form_rows)
try:
tr = next(form_rows)
except StopIteration:
break
else:
i += 1
# -------------------------------------------------------------------------
def _populate(self,
from_table = None,
from_record = None,
map_fields = None,
data = None,
formfields = None,
format = None,
):
"""
Pre-populate the form with values from a previous record or
controller-submitted data
@param from_table: the table to copy the data from
@param from_record: the record to copy the data from
@param map_fields: field selection/mapping
@param data: the data to prepopulate the form with
@param format: the request format extension
"""
table = self.table
record = None
# Pre-populate from a previous record?
if from_table is not None:
# Field mapping
if map_fields:
if isinstance(map_fields, dict):
# Map fields with other names
fields = [from_table[map_fields[f]]
for f in map_fields
if f in table.fields and
map_fields[f] in from_table.fields and
table[f].writable]
elif isinstance(map_fields, (list, tuple)):
# Only use a subset of the fields
fields = [from_table[f]
for f in map_fields
if f in table.fields and
f in from_table.fields and
table[f].writable]
else:
raise TypeError
else:
# Use all writable fields
fields = [from_table[f]
for f in table.fields
if f in from_table.fields and
table[f].writable]
# Audit read => this is a read method, after all
prefix, name = from_table._tablename.split("_", 1)
current.audit("read", prefix, name,
record=from_record, representation=format)
# Get original record
query = (from_table.id == from_record)
row = current.db(query).select(limitby=(0, 1), *fields).first()
if row:
if isinstance(map_fields, dict):
record = {f: row[map_fields[f]] for f in map_fields}
else:
record = row.as_dict()
# Pre-populate from call?
elif isinstance(data, dict):
record = {f: data[f] for f in data
if f in table.fields and table[f].writable}
# Add missing fields to pre-populated record
if record:
missing_fields = {}
if formfields:
for f in formfields:
fname = f.name
if fname not in record and f.writable:
missing_fields[fname] = f.default
else:
for f in table.fields:
if f not in record and table[f].writable:
missing_fields[f] = table[f].default
record.update(missing_fields)
record[table._id.name] = None
return record
# =============================================================================
class S3SQLDefaultForm(S3SQLForm):
""" Standard SQL form """
# -------------------------------------------------------------------------
# Rendering/Processing
# -------------------------------------------------------------------------
def __call__(self,
request = None,
resource = None,
record_id = None,
readonly = False,
message = "Record created/updated",
format = None,
**options):
"""
Render/process the form.
@param request: the S3Request
@param resource: the target S3Resource
@param record_id: the record ID
@param readonly: render the form read-only
@param message: message upon successful form submission
@param format: data format extension (for audit)
@param options: keyword options for the form
@todo: describe keyword arguments
@return: a FORM instance
"""
if resource is None:
self.resource = request.resource
self.prefix, self.name, self.table, self.tablename = \
request.target()
else:
self.resource = resource
self.prefix = resource.prefix
self.name = resource.name
self.tablename = resource.tablename
self.table = resource.table
response = current.response
s3 = response.s3
settings = s3.crud
prefix = self.prefix
name = self.name
tablename = self.tablename
table = self.table
record = None
labels = None
self.record_id = record_id
if not readonly:
get_option = options.get
# Populate create-form from another record?
if record_id is None:
data = get_option("data")
from_table = get_option("from_table")
from_record = get_option("from_record")
map_fields = get_option("map_fields")
record = self._populate(from_table = from_table,
from_record = from_record,
map_fields = map_fields,
data = data,
format = format,
)
# De-duplicate link table entries
self.record_id = record_id = self.deduplicate_link(request, record_id)
# Add asterisk to labels of required fields
mark_required = self._config("mark_required", default=[])
labels, required = s3_mark_required(table, mark_required)
# Show required-hint if there are any required fields.
s3.has_required = required
# Determine form style
if format == "plain":
# Default formstyle works best when we have no formatting
formstyle = "table3cols"
elif readonly:
formstyle = settings.formstyle_read
else:
formstyle = settings.formstyle
# Submit buttons
buttons = self._submit_buttons(readonly)
# Generate the form
if record is None:
record = record_id
response.form_label_separator = ""
form = SQLFORM(table,
record = record,
record_id = record_id,
readonly = readonly,
comments = not readonly,
deletable = False,
showid = False,
upload = s3.download_url,
labels = labels,
formstyle = formstyle,
separator = "",
submit_button = settings.submit_button,
buttons = buttons)
# Style the Submit button, if-requested
if settings.submit_style and not settings.custom_submit:
try:
form[0][-1][0][0]["_class"] = settings.submit_style
except:
# Submit button has been removed or a different formstyle,
# such as Bootstrap (which is already styled anyway)
pass
# Subheadings
subheadings = options.get("subheadings", None)
if subheadings:
self._insert_subheadings(form, tablename, formstyle, subheadings)
# Process the form
logged = False
if not readonly:
link = get_option("link")
hierarchy = get_option("hierarchy")
onvalidation = get_option("onvalidation")
onaccept = get_option("onaccept")
success, error = self.process(form,
request.post_vars,
onvalidation = onvalidation,
onaccept = onaccept,
hierarchy = hierarchy,
link = link,
http = request.http,
format = format,
)
if success:
response.confirmation = message
logged = True
elif error:
response.error = error
# Audit read
if not logged and not form.errors:
current.audit("read", prefix, name,
record=record_id, representation=format)
return form
# -------------------------------------------------------------------------
def deduplicate_link(self, request, record_id):
"""
Change to update if this request attempts to create a
duplicate entry in a link table
@param request: the request
@param record_id: the record ID
"""
linked = self.resource.linked
table = self.table
session = current.session
if request.env.request_method == "POST" and linked is not None:
pkey = table._id.name
post_vars = request.post_vars
if not post_vars[pkey]:
lkey = linked.lkey
rkey = linked.rkey
def parse_key(value):
key = s3_str(value)
if key.startswith("{"):
# JSON-based selector (e.g. S3LocationSelector)
return json.loads(key).get("id")
else:
# Normal selector (e.g. OptionsWidget)
return value
try:
lkey_ = parse_key(post_vars[lkey])
rkey_ = parse_key(post_vars[rkey])
except Exception:
return record_id
query = (table[lkey] == lkey_) & (table[rkey] == rkey_)
row = current.db(query).select(table._id, limitby=(0, 1)).first()
if row is not None:
tablename = self.tablename
record_id = row[pkey]
formkey = session.get("_formkey[%s/None]" % tablename)
formname = "%s/%s" % (tablename, record_id)
session["_formkey[%s]" % formname] = formkey
post_vars["_formname"] = formname
post_vars[pkey] = record_id
return record_id
# -------------------------------------------------------------------------
def process(self, form, vars,
onvalidation = None,
onaccept = None,
hierarchy = None,
link = None,
http = "POST",
format = None,
):
"""
Process the form
@param form: FORM instance
@param vars: request POST variables
@param onvalidation: callback(function) upon successful form validation
@param onaccept: callback(function) upon successful form acceptance
@param hierarchy: the data for the hierarchy link to create
@param link: component link
@param http: HTTP method
@param format: request extension
"""
table = self.table
tablename = self.tablename
# Get the proper onvalidation routine
if isinstance(onvalidation, dict):
onvalidation = onvalidation.get(tablename, [])
# Append link.postprocess to onvalidation
if link and link.postprocess:
postprocess = link.postprocess
if isinstance(onvalidation, list):
onvalidation.insert(0, postprocess)
elif onvalidation is not None:
onvalidation = [postprocess, onvalidation]
else:
onvalidation = [postprocess]
success = True
error = None
record_id = self.record_id
formname = "%s/%s" % (tablename, record_id)
if form.accepts(vars,
current.session,
formname = formname,
onvalidation = onvalidation,
keepvalues = False,
hideerror = False
):
# Undelete?
if vars.get("_undelete"):
undelete = form.vars.get("deleted") is False
else:
undelete = False
# Audit
prefix = self.prefix
name = self.name
if record_id is None or undelete:
current.audit("create", prefix, name, form=form,
representation=format)
else:
current.audit("update", prefix, name, form=form,
record=record_id, representation=format)
form_vars = form.vars
# Update super entity links
s3db = current.s3db
s3db.update_super(table, form_vars)
# Update component link
if link and link.postprocess is None:
resource = link.resource
master = link.master
resource.update_link(master, form_vars)
if form_vars.id:
if record_id is None or undelete:
# Create hierarchy link
if hierarchy:
from .s3hierarchy import S3Hierarchy
h = S3Hierarchy(tablename)
if h.config:
h.postprocess_create_node(hierarchy, form_vars)
# Set record owner
auth = current.auth
auth.s3_set_record_owner(table, form_vars.id)
auth.s3_make_session_owner(table, form_vars.id)
else:
# Update realm
update_realm = s3db.get_config(table, "update_realm")
if update_realm:
current.auth.set_realm_entity(table, form_vars,
force_update=True)
# Store session vars
self.resource.lastid = str(form_vars.id)
s3_store_last_record_id(tablename, form_vars.id)
# Execute onaccept
try:
callback(onaccept, form, tablename=tablename)
except:
error = "onaccept failed: %s" % str(onaccept)
current.log.error(error)
# This is getting swallowed
raise
else:
success = False
if form.errors:
# Revert any records created within widgets/validators
current.db.rollback()
# IS_LIST_OF validation errors need special handling
errors = []
for fieldname in form.errors:
if fieldname in table:
if isinstance(table[fieldname].requires, IS_LIST_OF):
errors.append("%s: %s" % (fieldname,
form.errors[fieldname]))
else:
errors.append(str(form.errors[fieldname]))
if errors:
error = "\n".join(errors)
elif http == "POST":
# Invalid form
error = current.T("Invalid form (re-opened in another window?)")
return success, error
# =============================================================================
class S3SQLCustomForm(S3SQLForm):
""" Custom SQL Form """
# -------------------------------------------------------------------------
def insert(self, index, element):
"""
S.insert(index, object) -- insert object before index
"""
if not element:
return
if isinstance(element, S3SQLFormElement):
self.elements.insert(index, element)
elif isinstance(element, str):
self.elements.insert(index, S3SQLField(element))
elif isinstance(element, tuple):
l = len(element)
if l > 1:
label, selector = element[:2]
widget = element[2] if l > 2 else DEFAULT
else:
selector = element[0]
label = widget = DEFAULT
self.elements.insert(index, S3SQLField(selector, label=label, widget=widget))
else:
msg = "Invalid form element: %s" % str(element)
if current.deployment_settings.get_base_debug():
raise SyntaxError(msg)
else:
current.log.error(msg)
# -------------------------------------------------------------------------
def append(self, element):
"""
S.append(object) -- append object to the end of the sequence
"""
self.insert(len(self), element)
# -------------------------------------------------------------------------
# Rendering/Processing
# -------------------------------------------------------------------------
def __call__(self,
request = None,
resource = None,
record_id = None,
readonly = False,
message = "Record created/updated",
format = None,
**options):
"""
Render/process the form.
@param request: the S3Request
@param resource: the target S3Resource
@param record_id: the record ID
@param readonly: render the form read-only
@param message: message upon successful form submission
@param format: data format extension (for audit)
@param options: keyword options for the form
@return: a FORM instance
"""
db = current.db
response = current.response
s3 = response.s3
# Determine the target resource
if resource is None:
resource = request.resource
self.prefix, self.name, self.table, self.tablename = \
request.target()
else:
self.prefix = resource.prefix
self.name = resource.name
self.tablename = resource.tablename
self.table = resource.table
self.resource = resource
# Resolve all form elements against the resource
subtables = set()
subtable_fields = {}
fields = []
components = []
for element in self.elements:
alias, name, field = element.resolve(resource)
if isinstance(alias, str):
subtables.add(alias)
if field is not None:
fields_ = subtable_fields.get(alias)
if fields_ is None:
fields_ = []
fields_.append((name, field))
subtable_fields[alias] = fields_
elif isinstance(alias, S3SQLFormElement):
components.append(alias)
if field is not None:
fields.append((alias, name, field))
self.subtables = subtables
self.components = components
rcomponents = resource.components
# Customise subtables
if subtables:
if not request:
# Create dummy S3Request
from .s3rest import S3Request
r = S3Request(resource.prefix,
resource.name,
# Current request args/vars could be in a different
# resource context, so must override them here:
args = [],
get_vars = {},
)
else:
r = request
customise_resource = current.deployment_settings.customise_resource
for alias in subtables:
# Get tablename
component = rcomponents.get(alias)
if not component:
continue
tablename = component.tablename
# Run customise_resource
customise = customise_resource(tablename)
if customise:
customise(r, tablename)
# Apply customised attributes to renamed fields
# => except default, label, requires and widget, which can be overridden
# in S3SQLField.resolve instead
renamed_fields = subtable_fields.get(alias)
if renamed_fields:
table = component.table
for name, renamed_field in renamed_fields:
original_field = table[name]
for attr in ("comment",
"default",
"readable",
"represent",
"requires",
"update",
"writable",
):
setattr(renamed_field,
attr,
getattr(original_field, attr),
)
# Mark required fields with asterisk
if not readonly:
mark_required = self._config("mark_required", default=[])
labels, required = s3_mark_required(self.table, mark_required)
# Show the required-hint if there are any required fields.
s3.has_required = required
else:
labels = None
# Choose formstyle
crud_settings = s3.crud
if format == "plain":
# Simple formstyle works best when we have no formatting
formstyle = "table3cols"
elif readonly:
formstyle = crud_settings.formstyle_read
else:
formstyle = crud_settings.formstyle
# Retrieve the record
record = None
if record_id is not None:
query = (self.table._id == record_id)
# @ToDo: limit fields (at least not meta)
record = db(query).select(limitby=(0, 1)).first()
self.record_id = record_id
self.subrows = Storage()
# Populate the form
data = None
noupdate = []
forbidden = []
has_permission = current.auth.s3_has_permission
if record is not None:
# Retrieve the subrows
subrows = self.subrows
for alias in subtables:
# Get the component
component = rcomponents.get(alias)
if not component or component.multiple:
continue
# Get the subtable row from the DB
subfields = subtable_fields.get(alias)
if subfields:
subfields = [f[0] for f in subfields]
row = self._subrow(query, component, fields=subfields)
# Check permission for this subtable row
ctname = component.tablename
if not row:
permitted = has_permission("create", ctname)
if not permitted:
forbidden.append(alias)
continue
else:
cid = row[component.table._id]
permitted = has_permission("read", ctname, cid)
if not permitted:
forbidden.append(alias)
continue
permitted = has_permission("update", ctname, cid)
if not permitted:
noupdate.append(alias)
# Add the row to the subrows
subrows[alias] = row
# Build the data Storage for the form
pkey = self.table._id
data = Storage({pkey.name:record[pkey]})
for alias, name, field in fields:
if alias is None:
# Field in the master table
if name in record:
value = record[name]
# Field Method?
if callable(value):
value = value()
data[field.name] = value
elif alias in subtables:
# Field in a subtable
if alias in subrows and \
subrows[alias] is not None and \
name in subrows[alias]:
data[field.name] = subrows[alias][name]
elif hasattr(alias, "extract"):
# Form element with custom extraction method
data[field.name] = alias.extract(resource, record_id)
else:
# Record does not exist
self.record_id = record_id = None
# Check create-permission for subtables
for alias in subtables:
component = rcomponents.get(alias)
if not component:
continue
permitted = has_permission("create", component.tablename)
if not permitted:
forbidden.append(alias)
# Apply permissions for subtables
fields = [f for f in fields if f[0] not in forbidden]
for a, n, f in fields:
if a:
if a in noupdate:
f.writable = False
if labels is not None and f.name not in labels:
if f.required:
flabels = s3_mark_required([f], mark_required=[f])[0]
labels[f.name] = flabels[f.name]
elif f.label:
labels[f.name] = "%s:" % f.label
else:
labels[f.name] = ""
if readonly:
# Strip all comments
for a, n, f in fields:
f.comment = None
else:
# Mark required subtable-fields (retaining override-labels)
for alias in subtables:
component = rcomponents.get(alias)
if not component:
continue
mark_required = component.get_config("mark_required", [])
ctable = component.table
sfields = dict((n, (f.name, f.label))
for a, n, f in fields
if a == alias and n in ctable)
slabels = s3_mark_required([ctable[n] for n in sfields],
mark_required=mark_required,
map_names=sfields)[0]
if labels:
labels.update(slabels)
else:
labels = slabels
self.subtables = [s for s in self.subtables if s not in forbidden]
# Aggregate the form fields
formfields = [f[-1] for f in fields]
# Prepopulate from another record?
get_option = options.get
if not record_id and request.http == "GET":
data = self._populate(from_table = get_option("from_table"),
from_record = get_option("from_record"),
map_fields = get_option("map_fields"),
data = get_option("data"),
format = format,
formfields = formfields
)
# Submit buttons
buttons = self._submit_buttons(readonly)
# Render the form
tablename = self.tablename
response.form_label_separator = ""
form = SQLFORM.factory(record = data,
showid = False,
labels = labels,
formstyle = formstyle,
table_name = tablename,
upload = s3.download_url,
readonly = readonly,
separator = "",
submit_button = crud_settings.submit_button,
buttons = buttons,
*formfields)
# Style the Submit button, if-requested
if crud_settings.submit_style and not crud_settings.custom_submit:
try:
form[0][-1][0][0]["_class"] = crud_settings.submit_style
except (KeyError, IndexError, TypeError):
# Submit button has been removed or a different formstyle,
# such as Bootstrap (which is already styled anyway)
pass
# Subheadings
subheadings = get_option("subheadings", None)
if subheadings:
self._insert_subheadings(form, tablename, formstyle, subheadings)
# Process the form
formname = "%s/%s" % (tablename, record_id)
post_vars = request.post_vars
if form.accepts(post_vars,
current.session,
onvalidation = self.validate,
formname = formname,
keepvalues = False,
hideerror = False,
):
# Undelete?
if post_vars.get("_undelete"):
undelete = post_vars.get("deleted") is False
else:
undelete = False
self.accept(form,
format = format,
link = get_option("link"),
hierarchy = get_option("hierarchy"),
undelete = undelete,
)
# Post-process the form submission after all records have
# been accepted and linked together (self.accept() has
# already updated the form data with any new keys here):
postprocess = self.opts.get("postprocess", None)
if postprocess:
try:
callback(postprocess, form, tablename=tablename)
except:
error = "postprocess failed: %s" % postprocess
current.log.error(error)
raise
response.confirmation = message
if form.errors:
# Revert any records created within widgets/validators
db.rollback()
response.error = current.T("There are errors in the form, please check your input")
return form
# -------------------------------------------------------------------------
def validate(self, form):
"""
Run the onvalidation callbacks for the master table
and all subtables in the form, and store any errors
in the form.
@param form: the form
"""
s3db = current.s3db
config = self._config
# Validate against the main table
if self.record_id:
onvalidation = config("update_onvalidation",
config("onvalidation", None))
else:
onvalidation = config("create_onvalidation",
config("onvalidation", None))
if onvalidation is not None:
try:
callback(onvalidation, form, tablename=self.tablename)
except:
error = "onvalidation failed: %s" % str(onvalidation)
current.log.error(error)
raise
# Validate against all subtables
get_config = s3db.get_config
for alias in self.subtables:
# Extract the subtable data
subdata = self._extract(form, alias)
if not subdata:
continue
# Get the onvalidation callback for this subtable
subtable = self.resource.components[alias].table
subform = Storage(vars = subdata,
errors = Storage(),
)
rows = self.subrows
if alias in rows and rows[alias] is not None:
# Add the record ID for update-onvalidation
pkey = subtable._id
subform.vars[pkey.name] = rows[alias][pkey]
subonvalidation = get_config(subtable._tablename,
"update_onvalidation",
get_config(subtable._tablename,
"onvalidation", None))
else:
subonvalidation = get_config(subtable._tablename,
"create_onvalidation",
get_config(subtable._tablename,
"onvalidation", None))
# Validate against the subtable, store errors in form
if subonvalidation is not None:
try:
callback(subonvalidation, subform,
tablename = subtable._tablename)
except:
error = "onvalidation failed: %s" % str(subonvalidation)
current.log.error(error)
raise
for fn in subform.errors:
dummy = "sub_%s_%s" % (alias, fn)
form.errors[dummy] = subform.errors[fn]
# Validate components (e.g. Inline-Forms)
for component in self.components:
if hasattr(component, "validate"):
component.validate(form)
return
# -------------------------------------------------------------------------
def accept(self,
form,
format = None,
link = None,
hierarchy = None,
undelete = False,
):
"""
Create/update all records from the form.
@param form: the form
@param format: data format extension (for audit)
@param link: resource.link for linktable components
@param hierarchy: the data for the hierarchy link to create
@param undelete: reinstate a previously deleted record
"""
db = current.db
resource = self.resource
table = self.table
accept_row = self._accept
input_data = self._extract
# Create/update the main record
main_data = input_data(form)
master_id, master_form_vars = accept_row(self.record_id,
main_data,
format = format,
link = link,
hierarchy = hierarchy,
undelete = undelete,
)
if not master_id:
return
else:
master_query = (table._id == master_id)
main_data[table._id.name] = master_id
# Make sure lastid is set even if master has no data
# (otherwise *_next redirection will fail)
resource.lastid = str(master_id)
# Create or update the subtables
get_subrow = self._subrow
for alias in self.subtables:
# Get the data for this subtable from the form
subdata = input_data(form, alias=alias)
if not subdata:
continue
component = resource.components[alias]
if not component or component.multiple:
return
subtable = component.table
# Get the key (pkey) of the master record to link the
# subtable record to, and update the subdata with it
pkey = component.pkey
if pkey != table._id.name and pkey not in main_data:
row = db(table._id == master_id).select(table[pkey],
limitby = (0, 1),
).first()
if not row:
return
main_data[pkey] = row[table[pkey]]
if component.link:
link = Storage(resource = component.link,
master = main_data,
)
else:
link = None
subdata[component.fkey] = main_data[pkey]
# Do we already have a record for this component?
subrow = get_subrow(master_query, component, fields=[subtable._id.name])
if subrow:
# Yes => get the subrecord ID
subid = subrow[subtable._id]
else:
# No => apply component defaults
subid = None
subdata = component.get_defaults(main_data,
data = subdata,
)
# Accept the subrecord
accept_row(subid,
subdata,
alias = alias,
link = link,
format = format,
)
# Accept components (e.g. Inline-Forms)
for item in self.components:
if hasattr(item, "accept"):
item.accept(form,
master_id = master_id,
format = format,
)
# Update form with master form_vars
form_vars = form.vars
# ID
form_vars[table._id.name] = master_id
# Super entities (& anything added manually in table's onaccept)
for var in master_form_vars:
if var not in form_vars:
form_vars[var] = master_form_vars[var]
return
# -------------------------------------------------------------------------
@staticmethod
def _subrow(master_query, component, fields=None):
"""
Extract the current row from a single-component
@param master_query: query for the master record
@param component: the single-component (S3Resource)
@param fields: list of field names to extract
"""
# Get the join for this subtable
if not component or component.multiple:
return None
query = master_query & component.get_join()
table = component.table
if fields:
# Map field names to component table
try:
fields = [table[f] for f in fields]
except (KeyError, AttributeError):
fields = None
else:
fields.insert(0, table._id)
if not fields:
fields = [table.ALL]
# Retrieve the row
return current.db(query).select(*fields,
limitby = (0, 1)
).first()
# -------------------------------------------------------------------------
# Utility functions
# -------------------------------------------------------------------------
def _extract(self, form, alias=None):
"""
Extract data for a subtable from the form
@param form: the form
@param alias: the component alias of the subtable
"""
if alias is None:
return self.table._filter_fields(form.vars)
else:
subform = Storage()
alias_length = len(alias)
form_vars = form.vars
for k in form_vars:
if k[:4] == "sub_" and \
k[4:4 + alias_length + 1] == "%s_" % alias:
fn = k[4 + alias_length + 1:]
subform[fn] = form_vars[k]
return subform
# -------------------------------------------------------------------------
def _accept(self,
record_id,
data,
alias = None,
format = None,
hierarchy = None,
link = None,
undelete = False,
):
"""
Create or update a record
@param record_id: the record ID
@param data: the data
@param alias: the component alias
@param format: the request format (for audit)
@param hierarchy: the data for the hierarchy link to create
@param link: resource.link for linktable components
@param undelete: reinstate a previously deleted record
"""
if alias is not None:
# Subtable
if not data or \
not record_id and all(value is None for value in data.values()):
# No data => skip
return None, Storage()
elif record_id and not data:
# Existing master record, no data => skip, but return
# record_id to allow update of inline-components:
return record_id, Storage()
s3db = current.s3db
if alias is None:
component = self.resource
else:
component = self.resource.components[alias]
# Get the DB table (without alias)
table = component.table
tablename = component.tablename
if component._alias != tablename:
unaliased = s3db.table(component.tablename)
# Must retain custom defaults of the aliased component:
for field in table:
field_ = unaliased[field.name]
field_.default = field.default
field_.update = field.update
table = unaliased
get_config = s3db.get_config
oldrecord = None
if record_id:
# Update existing record
accept_id = record_id
db = current.db
onaccept = get_config(tablename, "update_onaccept",
get_config(tablename, "onaccept", None))
table_fields = table.fields
query = (table._id == record_id)
if onaccept:
# Get oldrecord in full to save in form
oldrecord = db(query).select(limitby = (0, 1),
).first()
elif "deleted" in table_fields:
oldrecord = db(query).select(table.deleted,
limitby = (0, 1),
).first()
else:
oldrecord = None
if undelete:
# Restoring a previously deleted record
if "deleted" in table_fields:
data["deleted"] = False
if "created_by" in table_fields and current.auth.user:
data["created_by"] = current.auth.user.id
if "created_on" in table_fields:
data["created_on"] = current.request.utcnow
elif oldrecord and "deleted" in oldrecord and oldrecord.deleted:
# Do not (ever) update a deleted record that we don't
# want to restore, otherwise this may set foreign keys
# in a deleted record!
return accept_id
db(table._id == record_id).update(**data)
else:
# Insert new record
accept_id = table.insert(**data)
if not accept_id:
raise RuntimeError("Could not create record")
onaccept = get_config(tablename, "create_onaccept",
get_config(tablename, "onaccept", None))
data[table._id.name] = accept_id
prefix, name = tablename.split("_", 1)
form_vars = Storage(data)
form = Storage(vars = form_vars,
record = oldrecord,
)
# Audit
if record_id is None or undelete:
current.audit("create", prefix, name,
form = form,
representation = format)
else:
current.audit("update", prefix, name,
form = form,
record = accept_id,
representation = format)
# Update super entity links
s3db.update_super(table, form_vars)
# Update component link
if link and link.postprocess is None:
resource = link.resource
master = link.master
resource.update_link(master, form_vars)
if accept_id:
if record_id is None or undelete:
# Create hierarchy link
if hierarchy:
from .s3hierarchy import S3Hierarchy
h = S3Hierarchy(tablename)
if h.config:
h.postprocess_create_node(hierarchy, form_vars)
# Set record owner
auth = current.auth
auth.s3_set_record_owner(table, accept_id)
auth.s3_make_session_owner(table, accept_id)
else:
# Update realm
update_realm = get_config(table, "update_realm")
if update_realm:
current.auth.set_realm_entity(table, form_vars,
force_update = True,
)
# Store session vars
component.lastid = str(accept_id)
s3_store_last_record_id(tablename, accept_id)
# Execute onaccept
try:
callback(onaccept, form, tablename=tablename)
except:
error = "onaccept failed: %s" % str(onaccept)
current.log.error(error)
# This is getting swallowed
raise
if alias is None:
# Return master_form_vars
return accept_id, form.vars
else:
return accept_id
# =============================================================================
class S3SQLFormElement(object):
""" SQL Form Element Base Class """
# -------------------------------------------------------------------------
def __init__(self, selector, **options):
"""
Constructor to define the form element, to be extended
in subclass.
@param selector: the data object selector
@param options: options for the form element
"""
self.selector = selector
self.options = Storage(options)
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Method to resolve this form element against the calling resource.
To be implemented in subclass.
@param resource: the resource
@return: a tuple
(
form element,
original field name,
Field instance for the form renderer
)
The form element can be None for the main table, the component
alias for a subtable, or this form element instance for a
subform.
If None is returned as Field instance, this form element will
not be rendered at all. Besides setting readable/writable
in the Field instance, this can be another mechanism to
control access to form elements.
"""
return None, None, None
# -------------------------------------------------------------------------
# Utility methods
# -------------------------------------------------------------------------
@staticmethod
def _rename_field(field, name,
comments = True,
label = DEFAULT,
popup = None,
skip_post_validation = False,
widget = DEFAULT,
):
"""
Rename a field (actually: create a new Field instance with the
same attributes as the given Field, but a different field name).
@param field: the original Field instance
@param name: the new name
@param comments: render comments - if set to False, only
navigation items with an inline() renderer
method will be rendered (unless popup is None)
@param label: override option for the original field label
@param popup: only if comments=False, additional vars for comment
navigation items (e.g. S3PopupLink), None prevents
rendering of navigation items
@param skip_post_validation: skip field validation during POST,
useful for client-side processed
dummy fields.
@param widget: override option for the original field widget
"""
if label is DEFAULT:
label = field.label
if widget is DEFAULT:
# Some widgets may need disabling during POST
widget = field.widget
if not hasattr(field, "type"):
# Virtual Field
field = Storage(comment = None,
type = "string",
length = 255,
unique = False,
uploadfolder = None,
autodelete = False,
label = "",
writable = False,
readable = True,
default = None,
update = None,
compute = None,
represent = lambda v: v or "",
)
requires = None
required = False
notnull = False
elif skip_post_validation and \
current.request.env.request_method == "POST":
requires = SKIP_POST_VALIDATION(field.requires)
required = False
notnull = False
else:
requires = field.requires
required = field.required
notnull = field.notnull
if not comments:
if popup:
comment = field.comment
if hasattr(comment, "clone"):
comment = comment.clone()
if hasattr(comment, "renderer") and \
hasattr(comment, "inline") and \
isinstance(popup, dict):
comment.vars.update(popup)
comment.renderer = comment.inline
else:
comment = None
else:
comment = None
else:
comment = field.comment
f = Field(str(name),
type = field.type,
length = field.length,
required = required,
notnull = notnull,
unique = field.unique,
uploadfolder = field.uploadfolder,
autodelete = field.autodelete,
comment = comment,
label = label,
widget = widget,
default = field.default,
writable = field.writable,
readable = field.readable,
update = field.update,
compute = field.compute,
represent = field.represent,
requires = requires)
return f
# =============================================================================
class S3SQLField(S3SQLFormElement):
"""
Base class for regular form fields
A regular form field is a field in the main form, which can be
fields in the main record or in a subtable (single-record-component).
"""
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Method to resolve this form element against the calling resource.
@param resource: the resource
@return: a tuple
(
subtable alias (or None for main table),
original field name,
Field instance for the form renderer
)
"""
# Import S3ResourceField only here, to avoid circular dependency
from .s3query import S3ResourceField
rfield = S3ResourceField(resource, self.selector)
field = rfield.field
if field is None:
raise SyntaxError("Invalid selector: %s" % self.selector)
tname = rfield.tname
options_get = self.options.get
label = options_get("label", DEFAULT)
widget = options_get("widget", DEFAULT)
if resource._alias:
tablename = resource._alias
else:
tablename = resource.tablename
if tname == tablename:
# Field in the main table
if label is not DEFAULT:
field.label = label
if widget is not DEFAULT:
field.widget = widget
return None, field.name, field
else:
for alias, component in resource.components.loaded.items():
if component.multiple:
continue
if component._alias:
tablename = component._alias
else:
tablename = component.tablename
if tablename == tname:
name = "sub_%s_%s" % (alias, rfield.fname)
renamed_field = self._rename_field(field,
name,
label = label,
widget = widget,
)
return alias, field.name, renamed_field
raise SyntaxError("Invalid subtable: %s" % tname)
# =============================================================================
class S3SQLVirtualField(S3SQLFormElement):
"""
A form element to embed values of field methods (virtual fields),
always read-only
"""
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Method to resolve this form element against the calling resource.
@param resource: the resource
@return: a tuple
(
subtable alias (or None for main table),
original field name,
Field instance for the form renderer
)
"""
table = resource.table
selector = self.selector
if not hasattr(table, selector):
raise SyntaxError("Undefined virtual field: %s" % selector)
label = self.options.label
if not label:
label = " ".join(s.capitalize() for s in selector.split("_"))
field = Field(selector,
label = label,
widget = self,
)
return None, selector, field
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Widget renderer for field method values, renders a simple
read-only DIV with the value
"""
widget = DIV(value, **attributes)
widget.add_class("s3-virtual-field")
return widget
# =============================================================================
class S3SQLDummyField(S3SQLFormElement):
"""
A Dummy Field
A simple DIV which can then be acted upon with JavaScript
- used by dc_question Grids
"""
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Method to resolve this form element against the calling resource.
@param resource: the resource
@return: a tuple
(
subtable alias (or None for main table),
original field name,
Field instance for the form renderer
)
"""
selector = self.selector
field = Field(selector,
default = "",
label = "",
widget = self,
)
return None, selector, field
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Widget renderer for the input field. To be implemented in
subclass (if required) and to be set as widget=self for the
field returned by the resolve()-method of this form element.
@param field: the input field
@param value: the value to populate the widget
@param attributes: attributes for the widget
@return: the widget for this form element as HTML helper
"""
return DIV(_class = "s3-dummy-field",
)
# =============================================================================
class S3SQLSectionBreak(S3SQLFormElement):
"""
A Section Break
A simple DIV which can then be acted upon with JavaScript &/or Styled
- used by dc_template.layout
"""
# -------------------------------------------------------------------------
def __init__(self):
"""
Constructor to define the form element, to be extended
in subclass.
"""
super(S3SQLSectionBreak, self).__init__(None)
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Method to resolve this form element against the calling resource.
@param resource: the resource
@return: a tuple
(
subtable alias (or None for main table),
original field name,
Field instance for the form renderer
)
"""
selector = ""
field = Field(selector,
default = "",
label = "",
widget = self,
)
return None, selector, field
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Widget renderer for the input field. To be implemented in
subclass (if required) and to be set as widget=self for the
field returned by the resolve()-method of this form element.
@param field: the input field
@param value: the value to populate the widget
@param attributes: attributes for the widget
@return: the widget for this form element as HTML helper
"""
return DIV(_class = "s3-section-break",
)
# =============================================================================
class S3SQLInlineInstruction(S3SQLFormElement):
"""
Inline Instructions
A simple DIV which can then be acted upon with JavaScript &/or Styled
- used by dc_template.layout
"""
# -------------------------------------------------------------------------
def __init__(self, do, say, **options):
"""
Constructor to define the form element, to be extended
in subclass.
@param do: What to Do
@param say: What to Say
"""
super(S3SQLInlineInstruction, self).__init__(None)
self.do = do
self.say = say
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Method to resolve this form element against the calling resource.
@param resource: the resource
@return: a tuple
(
subtable alias (or None for main table),
original field name,
Field instance for the form renderer
)
"""
selector = ""
field = Field(selector,
default = "",
label = "",
widget = self,
)
return None, selector, field
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Widget renderer for the input field. To be implemented in
subclass (if required) and to be set as widget=self for the
field returned by the resolve()-method of this form element.
@param field: the input field
@param value: the value to populate the widget
@param attributes: attributes for the widget
@return: the widget for this form element as HTML helper
"""
element = DIV(_class = "s3-inline-instructions",
)
element["data-do"] = self.do
element["data-say"] = self.say
return element
# =============================================================================
class S3SQLSubForm(S3SQLFormElement):
"""
Base class for subforms
A subform is a form element to be processed after the main
form. Subforms render a single (usually hidden) input field
and a client-side controlled widget to manipulate its contents.
"""
# -------------------------------------------------------------------------
def __init__(self, selector, **options):
"""
Constructor to define the form element, to be extended
in subclass.
@param selector: the data object selector
@param options: options for the form element
"""
super(S3SQLSubForm, self).__init__(selector, **options)
self.alias = None
# -------------------------------------------------------------------------
def extract(self, resource, record_id):
"""
Initialize this form element for a particular record. This
method will be called by the form renderer to populate the
form for an existing record. To be implemented in subclass.
@param resource: the resource the record belongs to
@param record_id: the record ID
@return: the value for the input field that corresponds
to the specified record.
"""
return None
# -------------------------------------------------------------------------
def parse(self, value, record_id=None):
"""
Validator method for the input field, used to extract the
data from the input field and prepare them for further
processing by the accept()-method. To be implemented in
subclass and set as requires=self.parse for the input field
in the resolve()-method of this form element.
@param value: the value returned from the input field
@param record_id: usused (for API compatibility with validators)
@return: tuple of (value, error) where value is the
pre-processed field value and error an error
message in case of invalid data, or None.
"""
return (value, None)
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Widget renderer for the input field. To be implemented in
subclass (if required) and to be set as widget=self for the
field returned by the resolve()-method of this form element.
@param field: the input field
@param value: the value to populate the widget
@param attributes: attributes for the widget
@return: the widget for this form element as HTML helper
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def represent(self, value):
"""
Read-only representation of this form element. This will be
used instead of the __call__() method when the form element
is to be rendered read-only.
@param value: the value as returned from extract()
@return: the read-only representation of this element as
string or HTML helper
"""
return ""
# -------------------------------------------------------------------------
def accept(self, form, master_id=None, format=None):
"""
Post-process this form element and perform the related
transactions. This method will be called after the main
form has been accepted, where the master record ID will
be provided.
@param form: the form
@param master_id: the master record ID
@param format: the data format extension
@return: True on success, False on error
"""
return True
# =============================================================================
class SKIP_POST_VALIDATION(Validator):
"""
Pseudo-validator that allows introspection of field options
during GET, but does nothing during POST. Used for Ajax-validated
inline-components to prevent them from throwing validation errors
when the outer form gets submitted.
"""
def __init__(self, other=None):
"""
Constructor, used like:
field.requires = SKIP_POST_VALIDATION(field.requires)
@param other: the actual field validator
"""
if other and isinstance(other, (list, tuple)):
other = other[0]
self.other = other
if other:
if hasattr(other, "multiple"):
self.multiple = other.multiple
if hasattr(other, "options"):
self.options = other.options
if hasattr(other, "formatter"):
self.formatter = other.formatter
def __call__(self, value, record_id=None):
"""
Validation
@param value: the value
@param record_id: the record ID (unused, for API compatibility)
"""
other = self.other
if current.request.env.request_method == "POST" or not other:
return value, None
if not isinstance(other, (list, tuple)):
other = [other]
for r in other:
value, error = r(value)
if error:
return value, error
return value, None
# =============================================================================
class S3SQLSubFormLayout(object):
""" Layout for S3SQLInlineComponent (Base Class) """
# Layout-specific CSS class for the inline component
layout_class = "subform-default"
def __init__(self):
""" Constructor """
self.inject_script()
self.columns = None
self.row_actions = True
# -------------------------------------------------------------------------
def set_columns(self, columns, row_actions=True):
"""
Set column widths for inline-widgets, can be used by subclasses
to render CSS classes for grid-width
@param columns: iterable of column widths
@param actions: whether the subform contains an action column
"""
self.columns = columns
self.row_actions = row_actions
# -------------------------------------------------------------------------
def subform(self,
data,
item_rows,
action_rows,
empty = False,
readonly = False,
):
"""
Outer container for the subform
@param data: the data dict (as returned from extract())
@param item_rows: the item rows
@param action_rows: the (hidden) action rows
@param empty: no data in this component
@param readonly: render read-only
"""
if empty:
subform = current.T("No entries currently available")
else:
headers = self.headers(data, readonly=readonly)
subform = TABLE(headers,
TBODY(item_rows),
TFOOT(action_rows),
_class = " ".join(("embeddedComponent", self.layout_class)),
)
return subform
# -------------------------------------------------------------------------
def readonly(self, resource, data):
"""
Render this component read-only (table-style)
@param resource: the S3Resource
@param data: the data dict (as returned from extract())
"""
audit = current.audit
prefix, name = resource.prefix, resource.name
xml_decode = current.xml.xml_decode
items = data["data"]
fields = data["fields"]
trs = []
for item in items:
if "_id" in item:
record_id = item["_id"]
else:
continue
audit("read", prefix, name,
record=record_id, representation="html")
trow = TR(_class="read-row")
for f in fields:
text = xml_decode(item[f["name"]]["text"])
trow.append(XML(xml_decode(text)))
trs.append(trow)
return self.subform(data, trs, [], empty=False, readonly=True)
# -------------------------------------------------------------------------
@staticmethod
def render_list(resource, data):
"""
Render this component read-only (list-style)
@param resource: the S3Resource
@param data: the data dict (as returned from extract())
"""
audit = current.audit
prefix, name = resource.prefix, resource.name
xml_decode = current.xml.xml_decode
items = data["data"]
fields = data["fields"]
# Render as comma-separated list of values (no header)
elements = []
for item in items:
if "_id" in item:
record_id = item["_id"]
else:
continue
audit("read", prefix, name,
record=record_id, representation="html")
t = []
for f in fields:
t.append([XML(xml_decode(item[f["name"]]["text"])), " "])
elements.append([TAG[""](list(chain.from_iterable(t))[:-1]), ", "])
return DIV(list(chain.from_iterable(elements))[:-1],
_class = "embeddedComponent",
)
# -------------------------------------------------------------------------
def headers(self, data, readonly=False):
"""
Render the header row with field labels
@param data: the input field data as Python object
@param readonly: whether the form is read-only
"""
fields = data["fields"]
# Don't render a header row if there are no labels
render_header = False
header_row = TR(_class = "label-row static")
happend = header_row.append
for f in fields:
label = f["label"]
if label:
render_header = True
label = TD(LABEL(label))
happend(label)
if render_header:
if not readonly:
# Add columns for the Controls
happend(TD())
happend(TD())
return THEAD(header_row)
else:
return THEAD(_class = "hide")
# -------------------------------------------------------------------------
@staticmethod
def actions(subform,
formname,
index,
item = None,
readonly = True,
editable = True,
deletable = True,
):
"""
Render subform row actions into the row
@param subform: the subform row
@param formname: the form name
@param index: the row index
@param item: the row data
@param readonly: this is a read-row
@param editable: this row is editable
@param deletable: this row is deletable
"""
T = current.T
action_id = "%s-%s" % (formname, index)
# Action button helper
def action(title, name, throbber=False):
btn = DIV(_id = "%s-%s" % (name, action_id),
_class = "inline-%s" % name,
)
if throbber:
return DIV(btn,
DIV(_class = "inline-throbber hide",
_id = "throbber-%s" % action_id,
),
)
else:
return DIV(btn)
# CSS class for action-columns
_class = "subform-action"
# Render the action icons for this row
append = subform.append
if readonly:
if editable:
append(TD(action(T("Edit this entry"), "edt"),
_class = _class,
))
else:
append(TD(_class = _class))
if deletable:
append(TD(action(T("Remove this entry"), "rmv"),
_class = _class,
))
else:
append(TD(_class = _class))
else:
if index != "none" or item:
append(TD(action(T("Update this entry"), "rdy", throbber=True),
_class = _class,
))
append(TD(action(T("Cancel editing"), "cnc"),
_class = _class,
))
else:
append(TD(action(T("Discard this entry"), "dsc"),
_class = _class,
))
append(TD(action(T("Add this entry"), "add", throbber=True),
_class = _class,
))
# -------------------------------------------------------------------------
def rowstyle_read(self, form, fields, *args, **kwargs):
"""
Formstyle for subform read-rows, normally identical
to rowstyle, but can be different in certain layouts
"""
return self.rowstyle(form, fields, *args, **kwargs)
# -------------------------------------------------------------------------
def rowstyle(self, form, fields, *args, **kwargs):
"""
Formstyle for subform action-rows
"""
def render_col(col_id, label, widget, comment, hidden=False):
if col_id == "submit_record__row":
if hasattr(widget, "add_class"):
widget.add_class("inline-row-actions")
col = TD(widget)
elif comment:
col = TD(DIV(widget,
comment,
),
_id = col_id,
)
else:
col = TD(widget,
_id = col_id,
)
return col
if args:
col_id = form
label = fields
widget, comment = args
hidden = kwargs.get("hidden", False)
return render_col(col_id, label, widget, comment, hidden)
else:
parent = TR()
for col_id, label, widget, comment in fields:
parent.append(render_col(col_id, label, widget, comment))
return parent
# -------------------------------------------------------------------------
@staticmethod
def inject_script():
""" Inject custom JS to render new read-rows """
# Example:
#appname = current.request.application
#scripts = current.response.s3.scripts
#script = "/%s/static/themes/CRMT/js/inlinecomponent.layout.js" % appname
#if script not in scripts:
#scripts.append(script)
# No custom JS in the default layout
return
# =============================================================================
class S3SQLVerticalSubFormLayout(S3SQLSubFormLayout):
"""
Vertical layout for inline-components
- renders an vertical layout for edit-rows
- standard horizontal layout for read-rows
- hiding header row if there are no visible read-rows
"""
# Layout-specific CSS class for the inline component
layout_class = "subform-vertical"
# -------------------------------------------------------------------------
def headers(self, data, readonly=False):
"""
Header-row layout: same as default, but non-static (i.e. hiding
if there are no visible read-rows, because edit-rows have their
own labels)
"""
headers = super(S3SQLVerticalSubFormLayout, self).headers
header_row = headers(data, readonly = readonly)
element = header_row.element("tr")
if hasattr(element, "remove_class"):
element.remove_class("static")
return header_row
# -------------------------------------------------------------------------
def rowstyle_read(self, form, fields, *args, **kwargs):
"""
Formstyle for subform read-rows, same as standard
horizontal layout.
"""
rowstyle = super(S3SQLVerticalSubFormLayout, self).rowstyle
return rowstyle(form, fields, *args, **kwargs)
# -------------------------------------------------------------------------
def rowstyle(self, form, fields, *args, **kwargs):
"""
Formstyle for subform edit-rows, using a vertical
formstyle because multiple fields combined with
location-selector are too complex for horizontal
layout.
"""
# Use standard foundation formstyle
from s3theme import formstyle_foundation as formstyle
if args:
col_id = form
label = fields
widget, comment = args
hidden = kwargs.get("hidden", False)
return formstyle(col_id, label, widget, comment, hidden)
else:
parent = TD(_colspan = len(fields))
for col_id, label, widget, comment in fields:
parent.append(formstyle(col_id, label, widget, comment))
return TR(parent)
# =============================================================================
class S3SQLInlineComponent(S3SQLSubForm):
"""
Form element for an inline-component-form
This form element allows CRUD of multi-record-components within
the main record form. It renders a single hidden text field with a
JSON representation of the component records, and a widget which
facilitates client-side manipulation of this JSON.
This widget is a row of fields per component record.
The widget uses the s3.ui.inline_component.js script for client-side
manipulation of the JSON data. Changes made by the script will be
validated through Ajax-calls to the CRUD.validate() method.
During accept(), the component gets updated according to the JSON
returned.
@ToDo: Support filtering of field options
Usecase is inline project_organisation for IFRC
PartnerNS needs to be filtered differently from Partners/Donors,
so can't just set a global requires for the field in the controller
- needs to be inside the widget.
See private/templates/IFRC/config.py
"""
prefix = "sub"
def __init__(self, selector, **options):
super(S3SQLInlineComponent, self).__init__(selector, **options)
self.resource = None
self.upload = {}
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Method to resolve this form element against the calling resource.
@param resource: the resource
@return: a tuple (self, None, Field instance)
"""
selector = self.selector
# Check selector
try:
component = resource.components[selector]
except KeyError:
raise SyntaxError("Undefined component: %s" % selector)
# Check permission
permitted = current.auth.s3_has_permission("read",
component.tablename,
)
if not permitted:
return (None, None, None)
options = self.options
if "name" in options:
self.alias = options["name"]
label = self.alias
else:
self.alias = "default"
label = self.selector
if "label" in options:
label = options["label"]
else:
label = " ".join([s.capitalize() for s in label.split("_")])
fname = self._formname(separator = "_")
field = Field(fname, "text",
comment = options.get("comment", None),
default = self.extract(resource, None),
label = label,
represent = self.represent,
required = options.get("required", False),
requires = self.parse,
widget = self,
)
return (self, None, field)
# -------------------------------------------------------------------------
def extract(self, resource, record_id):
"""
Initialize this form element for a particular record. Retrieves
the component data for this record from the database and
converts them into a JSON string to populate the input field with.
@param resource: the resource the record belongs to
@param record_id: the record ID
@return: the JSON for the input field.
"""
self.resource = resource
component_name = self.selector
try:
component = resource.components[component_name]
except KeyError:
raise AttributeError("Undefined component")
options = self.options
if component.link:
link = options.get("link", True)
if link:
# For link-table components, embed the link
# table rather than the component
component = component.link
table = component.table
tablename = component.tablename
pkey = table._id.name
fields_opt = options.get("fields", None)
labels = {}
if fields_opt:
fields = []
for f in fields_opt:
if isinstance(f, tuple):
label, f = f
labels[f] = label
if f in table.fields:
fields.append(f)
else:
# Really?
fields = [f.name for f in table if f.readable or f.writable]
if pkey not in fields:
fields.insert(0, pkey)
# Support read-only Virtual Fields
if "virtual_fields" in options:
virtual_fields = options["virtual_fields"]
else:
virtual_fields = []
if "orderby" in options:
orderby = options["orderby"]
else:
orderby = component.get_config("orderby")
if record_id:
if "filterby" in options:
# Filter
f = self._filterby_query()
if f is not None:
component.build_query(filter=f)
if "extra_fields" in options:
extra_fields = options["extra_fields"]
else:
extra_fields = []
all_fields = fields + virtual_fields + extra_fields
start = 0
limit = 1 if options.multiple is False else None
data = component.select(all_fields,
start = start,
limit = limit,
represent = True,
raw_data = True,
show_links = False,
orderby = orderby,
)
records = data["rows"]
rfields = data["rfields"]
for f in rfields:
if f.fname in extra_fields:
rfields.remove(f)
else:
s = f.selector
if s.startswith("~."):
s = s[2:]
label = labels.get(s, None)
if label is not None:
f.label = label
else:
records = []
rfields = []
for s in fields:
rfield = component.resolve_selector(s)
label = labels.get(s, None)
if label is not None:
rfield.label = label
rfields.append(rfield)
for f in virtual_fields:
rfield = component.resolve_selector(f[1])
rfield.label = f[0]
rfields.append(rfield)
headers = [{"name": rfield.fname,
"label": s3_str(rfield.label),
}
for rfield in rfields if rfield.fname != pkey]
items = []
has_permission = current.auth.s3_has_permission
for record in records:
row = record["_row"]
row_id = row[str(table._id)]
item = {"_id": row_id}
permitted = has_permission("update", tablename, row_id)
if not permitted:
item["_readonly"] = True
for rfield in rfields:
fname = rfield.fname
if fname == pkey:
continue
colname = rfield.colname
field = rfield.field
widget = field.widget
if isinstance(widget, S3Selector):
# Use the widget extraction/serialization method
value = widget.serialize(widget.extract(row[colname]))
elif hasattr(field, "formatter"):
value = field.formatter(row[colname])
else:
# Virtual Field
value = row[colname]
text = s3_str(record[colname])
# Text representation is only used in read-forms where
# representation markup cannot interfere with the inline
# form logic - so stripping the markup should not be
# necessary here:
#if "<" in text:
# text = s3_strip_markup(text)
item[fname] = {"value": value, "text": text}
items.append(item)
validate = options.get("validate", None)
if not validate or \
not isinstance(validate, tuple) or \
not len(validate) == 2:
request = current.request
validate = (request.controller, request.function)
c, f = validate
data = {"controller": c,
"function": f,
"resource": resource.tablename,
"component": component_name,
"fields": headers,
"defaults": self._filterby_defaults(),
"data": items,
}
return json.dumps(data, separators=SEPARATORS)
# -------------------------------------------------------------------------
def parse(self, value, record_id=None):
"""
Validator method, converts the JSON returned from the input
field into a Python object.
@param value: the JSON from the input field.
@param record_id: usused (for API compatibility with validators)
@return: tuple of (value, error), where value is the converted
JSON, and error the error message if the decoding
fails, otherwise None
"""
# @todo: catch uploads during validation errors
if isinstance(value, str):
try:
value = json.loads(value)
except JSONERRORS:
import sys
error = sys.exc_info()[1]
if hasattr(error, "message"):
error = error.message
else:
error = None
else:
value = None
error = None
return (value, error)
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Widget method for this form element. Renders a table with
read-rows for existing entries, a variable edit-row to update
existing entries, and an add-row to add new entries. This widget
uses s3.inline_component.js to facilitate manipulation of the
entries.
@param field: the Field for this form element
@param value: the current value for this field
@param attributes: keyword attributes for this widget
"""
T = current.T
settings = current.deployment_settings
options = self.options
if options.readonly is True:
# Render read-only
return self.represent(value)
if value is None:
value = field.default
if isinstance(value, str):
data = json.loads(value)
else:
data = value
value = json.dumps(value, separators=SEPARATORS)
if data is None:
raise SyntaxError("No resource structure information")
if options.multiple is False:
multiple = False
else:
multiple = True
required = options.get("required", False)
# Get the table
resource = self.resource
component_name = data["component"]
component = resource.components[component_name]
table = component.table
# @ToDo: Hide completely if the user is not permitted to read this
# component
formname = self._formname()
fields = data["fields"]
items = data["data"]
# Flag whether there are any rows (at least an add-row) in the widget
has_rows = False
# Add the item rows
item_rows = []
prefix = component.prefix
name = component.name
audit = current.audit
has_permission = current.auth.s3_has_permission
tablename = component.tablename
# Configure the layout
layout = self._layout()
columns = options.get("columns")
if columns:
layout.set_columns(columns, row_actions = multiple)
get_config = current.s3db.get_config
_editable = get_config(tablename, "editable")
if _editable is None:
_editable = True
_deletable = get_config(tablename, "deletable")
if _deletable is None:
_deletable = True
_class = "read-row inline-form"
if not multiple:
# Mark to client-side JS that we should open Edit Row
_class = "%s single" % _class
item = None
for i in range(len(items)):
has_rows = True
item = items[i]
# Get the item record ID
if "_delete" in item and item["_delete"]:
continue
elif "_id" in item:
record_id = item["_id"]
# Check permissions to edit this item
if _editable:
editable = has_permission("update", tablename, record_id)
else:
editable = False
if _deletable:
deletable = has_permission("delete", tablename, record_id)
else:
deletable = False
else:
record_id = None
editable = bool(_editable)
deletable = bool(_deletable)
# Render read-row accordingly
rowname = "%s-%s" % (formname, i)
read_row = self._render_item(table, item, fields,
editable = editable,
deletable = deletable,
readonly = True,
multiple = multiple,
index = i,
layout = layout,
_id = "read-row-%s" % rowname,
_class = _class,
)
if record_id:
audit("read", prefix, name,
record = record_id,
representation = "html",
)
item_rows.append(read_row)
# Add the action rows
action_rows = []
# Edit-row
_class = "edit-row inline-form hide"
if required and has_rows:
_class = "%s required" % _class
if not multiple:
_class = "%s single" % _class
edit_row = self._render_item(table, item, fields,
editable = _editable,
deletable = _deletable,
readonly = False,
multiple = multiple,
index = 0,
layout = layout,
_id = "edit-row-%s" % formname,
_class = _class,
)
action_rows.append(edit_row)
# Add-row
inline_open_add = ""
insertable = get_config(tablename, "insertable")
if insertable is None:
insertable = True
if insertable:
insertable = has_permission("create", tablename)
if insertable:
_class = "add-row inline-form"
explicit_add = options.explicit_add
if not multiple:
explicit_add = False
if has_rows:
# Add Rows not relevant
_class = "%s hide" % _class
else:
# Mark to client-side JS that we should always validate
_class = "%s single" % _class
if required and not has_rows:
explicit_add = False
_class = "%s required" % _class
# Explicit open-action for add-row (optional)
if explicit_add:
# Hide add-row for explicit open-action
_class = "%s hide" % _class
if explicit_add is True:
label = T("Add another")
else:
label = explicit_add
inline_open_add = A(label,
_class = "inline-open-add action-lnk",
)
has_rows = True
add_row = self._render_item(table, None, fields,
editable = True,
deletable = True,
readonly = False,
multiple = multiple,
layout = layout,
_id = "add-row-%s" % formname,
_class = _class,
)
action_rows.append(add_row)
# Empty edit row
empty_row = self._render_item(table, None, fields,
editable = _editable,
deletable = _deletable,
readonly = False,
multiple = multiple,
index = "default",
layout = layout,
_id = "empty-edit-row-%s" % formname,
_class = "empty-row inline-form hide",
)
action_rows.append(empty_row)
# Empty read row
empty_row = self._render_item(table, None, fields,
editable = _editable,
deletable = _deletable,
readonly = True,
multiple = multiple,
index = "none",
layout = layout,
_id = "empty-read-row-%s" % formname,
_class = "empty-row inline-form hide",
)
action_rows.append(empty_row)
# Real input: a hidden text field to store the JSON data
real_input = "%s_%s" % (resource.tablename, field.name)
default = {"_type": "hidden",
"_value": value,
"requires": lambda v: (v, None),
}
attr = StringWidget._attributes(field, default, **attributes)
attr["_class"] = "%s hide" % attr["_class"]
attr["_id"] = real_input
widget = layout.subform(data,
item_rows,
action_rows,
empty = not has_rows,
)
if self.upload:
hidden = DIV(_class="hidden", _style="display:none")
for k, v in self.upload.items():
hidden.append(INPUT(_type = "text",
_id = k,
_name = k,
_value = v,
_style = "display:none",
))
else:
hidden = ""
# Render output HTML
output = DIV(INPUT(**attr),
hidden,
widget,
inline_open_add,
_id = self._formname(separator="-"),
_field = real_input,
_class = "inline-component",
)
# Reset the layout
layout.set_columns(None)
# Script options
js_opts = {"implicitCancelEdit": settings.get_ui_inline_cancel_edit(),
"confirmCancelEdit": s3_str(T("Discard changes?")),
}
script = '''S3.inlineComponentsOpts=%s''' % json.dumps(js_opts)
js_global = current.response.s3.js_global
if script not in js_global:
js_global.append(script)
return output
# -------------------------------------------------------------------------
def represent(self, value):
"""
Read-only representation of this sub-form
@param value: the value returned from extract()
"""
if isinstance(value, str):
data = json.loads(value)
else:
data = value
if data["data"] == []:
# Don't render a subform for NONE
return current.messages["NONE"]
resource = self.resource
component = resource.components[data["component"]]
layout = self._layout()
columns = self.options.get("columns")
if columns:
layout.set_columns(columns, row_actions=False)
fields = data["fields"]
if len(fields) == 1 and self.options.get("render_list", False):
output = layout.render_list(component, data)
else:
output = layout.readonly(component, data)
# Reset the layout
layout.set_columns(None)
return DIV(output,
_id = self._formname(separator = "-"),
_class = "inline-component readonly",
)
# -------------------------------------------------------------------------
def accept(self, form, master_id=None, format=None):
"""
Post-processes this form element against the POST data of the
request, and create/update/delete any related records.
@param form: the form
@param master_id: the ID of the master record in the form
@param format: the data format extension (for audit)
"""
# Name of the real input field
fname = self._formname(separator="_")
options_get = self.options.get
multiple = options_get("multiple", True)
defaults = options_get("default", {})
if fname in form.vars:
# Retrieve the data
try:
data = json.loads(form.vars[fname])
except ValueError:
return False
component_name = data.get("component", None)
if not component_name:
return False
data = data.get("data", None)
if not data:
return False
# Get the component
resource = self.resource
component = resource.components.get(component_name)
if not component:
return False
# Link table handling
link = component.link
if link and options_get("link", True):
# Data are for the link table
actuate_link = False
component = link
else:
# Data are for the component
actuate_link = True
# Table, tablename, prefix and name of the component
prefix = component.prefix
name = component.name
tablename = component.tablename
db = current.db
table = db[tablename]
s3db = current.s3db
auth = current.auth
# Process each item
has_permission = auth.s3_has_permission
audit = current.audit
onaccept = s3db.onaccept
for item in data:
if not "_changed" in item and not "_delete" in item:
# No changes made to this item - skip
continue
delete = item.get("_delete")
values = Storage()
valid = True
if not delete:
# Get the values
for f, d in item.items():
if f[0] != "_" and d and isinstance(d, dict):
field = table[f]
widget = field.widget
if not hasattr(field, "type"):
# Virtual Field
continue
elif field.type == "upload":
# Find, rename and store the uploaded file
rowindex = item.get("_index", None)
if rowindex is not None:
filename = self._store_file(table, f, rowindex)
if filename:
values[f] = filename
elif isinstance(widget, S3Selector):
# Value must be processed by widget post-process
value, error = widget.postprocess(d["value"])
if not error:
values[f] = value
else:
valid = False
break
else:
# Must run through validator again (despite pre-validation)
# in order to post-process widget output properly (e.g. UTC
# offset subtraction)
try:
value, error = s3_validate(table, f, d["value"])
except AttributeError:
continue
if not error:
values[f] = value
else:
valid = False
break
if not valid:
# Skip invalid items
continue
record_id = item.get("_id")
if not record_id:
if delete:
# Item has been added and then removed again,
# so just ignore it
continue
elif not component.multiple or not multiple:
# Do not create a second record in this component
query = (resource._id == master_id) & \
component.get_join()
f = self._filterby_query()
if f is not None:
query &= f
DELETED = current.xml.DELETED
if DELETED in table.fields:
query &= table[DELETED] != True
row = db(query).select(table._id,
limitby = (0, 1),
).first()
if row:
record_id = row[table._id]
if record_id:
# Delete..?
if delete:
authorized = has_permission("delete", tablename, record_id)
if not authorized:
continue
c = s3db.resource(tablename, id=record_id)
# Audit happens inside .delete()
# Use cascade=True so that the deletion gets
# rolled back in case subsequent items fail:
success = c.delete(cascade=True, format="html")
# ...or update?
else:
authorized = has_permission("update", tablename, record_id)
if not authorized:
continue
query = (table._id == record_id)
success = db(query).update(**values)
values[table._id.name] = record_id
# Post-process update
if success:
audit("update", prefix, name,
record=record_id, representation=format)
# Update super entity links
s3db.update_super(table, values)
# Update realm
update_realm = s3db.get_config(table, "update_realm")
if update_realm:
auth.set_realm_entity(table, values,
force_update = True)
# Onaccept
onaccept(table, Storage(vars = values), method = "update")
else:
# Create a new record
authorized = has_permission("create", tablename)
if not authorized:
continue
# Get master record ID
pkey = component.pkey
mastertable = resource.table
if pkey != mastertable._id.name:
query = (mastertable._id == master_id)
master = db(query).select(mastertable._id,
mastertable[pkey],
limitby = (0, 1),
).first()
if not master:
return False
else:
master = Storage({pkey: master_id})
if actuate_link:
# Data are for component => apply component defaults
values = component.get_defaults(master,
defaults = defaults,
data = values,
)
if not actuate_link or not link:
# Add master record ID as linked directly
values[component.fkey] = master[pkey]
else:
# Check whether the component is a link table and
# we're linking to that via something like pr_person
# from hrm_human_resource
fkey = component.fkey
if fkey != "id" and fkey in component.fields and fkey not in values:
if fkey == "pe_id" and pkey == "person_id":
# Need to lookup the pe_id manually (bad that we need this
# special case, must be a better way but this works for now)
ptable = s3db.pr_person
query = (ptable.id == master[pkey])
person = db(query).select(ptable.pe_id,
limitby = (0, 1),
).first()
if person:
values["pe_id"] = person.pe_id
else:
current.log.debug("S3Forms: Cannot find person with ID: %s" % master[pkey])
elif resource.tablename == "pr_person" and \
fkey == "case_id" and pkey == "id":
# Using dvr_case as a link between pr_person & e.g. project_activity
# @ToDo: Work out generalisation & move to option if-possible
ltable = component.link.table
query = (ltable.person_id == master[pkey])
link_record = db(query).select(ltable.id,
limitby = (0, 1),
).first()
if link_record:
values[fkey] = link_record[pkey]
else:
current.log.debug("S3Forms: Cannot find case for person ID: %s" % master[pkey])
else:
values[fkey] = master[pkey]
# Create the new record
# use _table in case we are using an alias
try:
record_id = component._table.insert(**values)
except:
current.log.debug("S3Forms: Cannot insert values %s into table: %s" % (values, component._table))
raise
# Post-process create
if record_id:
# Ensure we're using the real table, not an alias
table = db[tablename]
# Audit
audit("create", prefix, name,
record = record_id,
representation = format,
)
# Add record_id
values[table._id.name] = record_id
# Update super entity link
s3db.update_super(table, values)
# Update link table
if link and actuate_link and \
options_get("update_link", True):
link.update_link(master, values)
# Set record owner
auth.s3_set_record_owner(table, record_id)
# onaccept
subform = Storage(vars = Storage(values))
onaccept(table, subform, method="create")
# Success
return True
else:
return False
# -------------------------------------------------------------------------
# Utility methods
# -------------------------------------------------------------------------
def _formname(self, separator=None):
"""
Generate a string representing the formname
@param separator: separator to prepend a prefix
"""
if separator:
return "%s%s%s%s" % (self.prefix,
separator,
self.alias,
self.selector,
)
else:
return "%s%s" % (self.alias, self.selector)
# -------------------------------------------------------------------------
def _layout(self):
""" Get the current layout """
layout = self.options.layout
if not layout:
layout = current.deployment_settings.get_ui_inline_component_layout()
elif isinstance(layout, type):
layout = layout()
return layout
# -------------------------------------------------------------------------
def _render_item(self,
table,
item,
fields,
readonly = True,
editable = False,
deletable = False,
multiple = True,
index = "none",
layout = None,
**attributes):
"""
Render a read- or edit-row.
@param table: the database table
@param item: the data
@param fields: the fields to render (list of strings)
@param readonly: render a read-row (otherwise edit-row)
@param editable: whether the record can be edited
@param deletable: whether the record can be deleted
@param multiple: whether multiple records can be added
@param index: the row index
@param layout: the subform layout (S3SQLSubFormLayout)
@param attributes: HTML attributes for the row
"""
s3 = current.response.s3
rowtype = "read" if readonly else "edit"
pkey = table._id.name
data = {}
formfields = []
formname = self._formname()
for f in fields:
# Construct a row-specific field name
fname = f["name"]
idxname = "%s_i_%s_%s_%s" % (formname, fname, rowtype, index)
# Parent and caller for add-popup
if not readonly:
# Use unaliased name to avoid need to create additional controllers
parent = original_tablename(table).split("_", 1)[1]
caller = "sub_%s_%s" % (formname, idxname)
popup = Storage(parent = parent,
caller = caller,
)
else:
popup = None
# Custom label
label = f.get("label", DEFAULT)
# Use S3UploadWidget for upload fields
if str(table[fname].type) == "upload":
widget = S3UploadWidget.widget
else:
widget = DEFAULT
# Get a Field instance for SQLFORM.factory
formfield = self._rename_field(table[fname],
idxname,
comments = False,
label = label,
popup = popup,
skip_post_validation = True,
widget = widget,
)
# Reduced options set?
if "filterby" in self.options:
options = self._filterby_options(fname)
if options:
if len(options) < 2:
requires = IS_IN_SET(options, zero=None)
else:
requires = IS_IN_SET(options)
formfield.requires = SKIP_POST_VALIDATION(requires)
# Get filterby-default
filterby_defaults = self._filterby_defaults()
if filterby_defaults and fname in filterby_defaults:
default = filterby_defaults[fname]["value"]
formfield.default = default
# Add the data for this field (for existing rows)
if index is not None and item and fname in item:
if formfield.type == "upload":
filename = item[fname]["value"]
if current.request.env.request_method == "POST":
if "_index" in item and item.get("_changed", False):
rowindex = item["_index"]
filename = self._store_file(table, fname, rowindex)
data[idxname] = filename
else:
value = item[fname]["value"]
if type(value) is str:
value = s3_str(value)
widget = formfield.widget
if isinstance(widget, S3Selector):
# Use the widget parser to get at the selected ID
value, error = widget.parse(value).get("id"), None
else:
# Use the validator to get at the original value
value, error = s3_validate(table, fname, value)
if error:
value = None
data[idxname] = value
formfields.append(formfield)
if not data:
data = None
elif pkey not in data:
data[pkey] = None
# Render the subform
subform_name = "sub_%s" % formname
rowstyle = layout.rowstyle_read if readonly else layout.rowstyle
subform = SQLFORM.factory(*formfields,
record = data,
showid = False,
formstyle = rowstyle,
upload = s3.download_url,
readonly = readonly,
table_name = subform_name,
separator = ":",
submit = False,
buttons = [],
)
subform = subform[0]
# Retain any CSS classes added by the layout
subform_class = subform["_class"]
subform.update(**attributes)
if subform_class:
subform.add_class(subform_class)
if multiple:
# Render row actions
layout.actions(subform,
formname,
index,
item = item,
readonly = readonly,
editable = editable,
deletable = deletable,
)
return subform
# -------------------------------------------------------------------------
def _filterby_query(self):
"""
Render the filterby-options as Query to apply when retrieving
the existing rows in this inline-component
"""
filterby = self.options["filterby"]
if not filterby:
return None
if not isinstance(filterby, (list, tuple)):
filterby = [filterby]
component = self.resource.components[self.selector]
table = component.table
query = None
for f in filterby:
fieldname = f["field"]
if fieldname not in table.fields:
continue
field = table[fieldname]
if "options" in f:
options = f["options"]
else:
continue
if "invert" in f:
invert = f["invert"]
else:
invert = False
if not isinstance(options, (list, tuple)):
if invert:
q = (field != options)
else:
q = (field == options)
else:
if invert:
q = (~(field.belongs(options)))
else:
q = (field.belongs(options))
if query is None:
query = q
else:
query &= q
return query
# -------------------------------------------------------------------------
def _filterby_defaults(self):
"""
Render the defaults for this inline-component as a dict
for the real-input JSON
"""
filterby = self.options.get("filterby")
if filterby is None:
return None
if not isinstance(filterby, (list, tuple)):
filterby = [filterby]
component = self.resource.components[self.selector]
table = component.table
defaults = dict()
for f in filterby:
fieldname = f["field"]
if fieldname not in table.fields:
continue
if "default" in f:
default = f["default"]
elif "options" in f:
options = f["options"]
if "invert" in f and f["invert"]:
continue
if isinstance(options, (list, tuple)):
if len(options) != 1:
continue
else:
default = options[0]
else:
default = options
else:
continue
if default is not None:
defaults[fieldname] = {"value": default}
return defaults
# -------------------------------------------------------------------------
def _filterby_options(self, fieldname):
"""
Re-render the options list for a field if there is a
filterby-restriction.
@param fieldname: the name of the field
"""
component = self.resource.components[self.selector]
table = component.table
if fieldname not in table.fields:
return None
field = table[fieldname]
filterby = self.options["filterby"]
if filterby is None:
return None
if not isinstance(filterby, (list, tuple)):
filterby = [filterby]
filter_fields = dict((f["field"], f) for f in filterby)
if fieldname not in filter_fields:
return None
filterby = filter_fields[fieldname]
if "options" not in filterby:
return None
# Get the options list for the original validator
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
r = requires[0]
if isinstance(r, IS_EMPTY_OR):
#empty = True
r = r.other
# Currently only supporting IS_IN_SET
if not isinstance(r, IS_IN_SET):
return None
else:
return None
r_opts = r.options()
# Get the filter options
options = filterby["options"]
if not isinstance(options, (list, tuple)):
options = [options]
subset = []
if "invert" in filterby:
invert = filterby["invert"]
else:
invert = False
# Compute reduced options list
for o in r_opts:
if invert:
if isinstance(o, (list, tuple)):
if o[0] not in options:
subset.append(o)
elif isinstance(r_opts, dict):
if o not in options:
subset.append((o, r_opts[o]))
elif o not in options:
subset.append(o)
else:
if isinstance(o, (list, tuple)):
if o[0] in options:
subset.append(o)
elif isinstance(r_opts, dict):
if o in options:
subset.append((o, r_opts[o]))
elif o in options:
subset.append(o)
return subset
# -------------------------------------------------------------------------
def _store_file(self, table, fieldname, rowindex):
"""
Find, rename and store an uploaded file and return it's
new pathname
"""
field = table[fieldname]
formname = self._formname()
upload = "upload_%s_%s_%s" % (formname, fieldname, rowindex)
post_vars = current.request.post_vars
if upload in post_vars:
f = post_vars[upload]
if hasattr(f, "file"):
# Newly uploaded file (FieldStorage)
(sfile, ofilename) = (f.file, f.filename)
nfilename = field.store(sfile,
ofilename,
field.uploadfolder)
self.upload[upload] = nfilename
return nfilename
elif isinstance(f, str):
# Previously uploaded file
return f
return None
# =============================================================================
class S3SQLInlineLink(S3SQLInlineComponent):
"""
Subform to edit link table entries for the master record
Constructor options:
** Common options:
readonly..........True|False......render read-only always
multiple..........True|False......allow selection of multiple
options (default True)
widget............string..........which widget to use, one of:
- multiselect (default)
- groupedopts (default when cols is specified)
- hierarchy (requires hierarchical lookup-table)
- cascade (requires hierarchical lookup-table)
render_list.......True|False......in read-only mode, render HTML
list rather than comma-separated
strings (default False)
** Options for groupedopts widget:
cols..............integer.........number of columns for grouped
options (default: None)
orientation.......string..........orientation for grouped options
order, one of:
- cols
- rows
size..............integer.........maximum number of items per group
in grouped options, None to disable
grouping
sort..............True|False......sort grouped options (always True
when grouping, i.e. size!=None)
help_field........string..........additional field in the look-up
table to render as tooltip for
grouped options
table.............True|False......render grouped options as HTML
TABLE rather than nested DIVs
(default True)
** Options for multi-select widget:
header............True|False......multi-select to show a header with
bulk-select options and optional
search-field
search............True|False......show the search-field in the header
selectedList......integer.........how many items to show on multi-select
button before collapsing into number
noneSelectedText..string..........placeholder text on multi-select button
columns...........integer.........Foundation column-width for the
widget (for custom forms)
create............dict............Options to create a new record {"c": "controller",
"f": "function",
"label": "label",
"parent": "parent", (optional: which function to lookup options from)
"child": "child", (optional: which field to lookup options for)
}
** Options-filtering:
- multiselect and groupedopts only
- for hierarchy and cascade widgets, use the "filter" option
requires..........Validator.......validator to determine the
selectable options (defaults to
field validator)
filterby..........field selector..filter look-up options by this field
(can be a field in the look-up table
itself or in another table linked to it)
options...........value|list......filter for these values, or:
match.............field selector..lookup the filter value from this
field (can be a field in the master
table, or in linked table)
** Options for hierarchy and cascade widgets:
levels............list............ordered list of labels for hierarchy
levels (top-down order), to override
the lookup-table's "hierarchy_levels"
setting, cascade-widget only
represent.........callback........representation method for hierarchy
nodes (defaults to field represent)
leafonly..........True|False......only leaf nodes can be selected
cascade...........True|False......automatically select the entire branch
when a parent node is newly selected;
with multiple=False, this will
auto-select single child options
(default True when leafonly=True)
filter............resource query..filter expression to filter the
selectable options
"""
prefix = "link"
# -------------------------------------------------------------------------
def extract(self, resource, record_id):
"""
Get all existing links for record_id.
@param resource: the resource the record belongs to
@param record_id: the record ID
@return: list of component record IDs this record is
linked to via the link table
"""
self.resource = resource
component, link = self.get_link()
# Customise resources
from .s3rest import S3Request
r = S3Request(resource.prefix,
resource.name,
# Current request args/vars could be in a different
# resource context, so must override them here:
args = [],
get_vars = {},
)
customise_resource = current.deployment_settings.customise_resource
for tablename in (component.tablename, link.tablename):
customise = customise_resource(tablename)
if customise:
customise(r, tablename)
self.initialized = True
if record_id:
rkey = component.rkey
rows = link.select([rkey], as_rows=True)
if rows:
rkey = str(link.table[rkey])
values = [row[rkey] for row in rows]
else:
values = []
else:
# Use default
values = [link.table[self.options.field].default]
return values
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Widget renderer, currently supports multiselect (default),
hierarchy and groupedopts widgets.
@param field: the input field
@param value: the value to populate the widget
@param attributes: attributes for the widget
@return: the widget
"""
options = self.options
component, link = self.get_link()
has_permission = current.auth.s3_has_permission
ltablename = link.tablename
# User must have permission to create and delete
# link table entries (which is what this widget is about):
if options.readonly is True or \
not has_permission("create", ltablename) or \
not has_permission("delete", ltablename):
# Render read-only
return self.represent(value)
multiple = options.get("multiple", True)
options["multiple"] = multiple
# Field dummy
kfield = link.table[component.rkey]
dummy_field = Storage(name = field.name,
type = kfield.type,
label = options.label or kfield.label,
represent = kfield.represent,
)
# Widget type
widget = options.get("widget")
if widget not in ("hierarchy", "cascade"):
requires = options.get("requires")
if requires is None:
# Get the selectable entries for the widget and construct
# a validator from it
opts = self.get_options()
zero = options.get("zero", XML(" "))
if multiple or zero is not None:
# Drop the empty option
# - multiple does not need one (must de-select all instead)
# - otherwise, it shall be replaced by the zero-option
opts = {k: v for k, v in opts.items() if k != ""}
requires = IS_IN_SET(opts,
multiple = multiple,
zero = None if multiple else zero,
sort = options.get("sort", True),
)
if zero is not None:
# Allow deselecting all (or single: selection of explicit none)
# NB this is the default, unless zero is explicitly set to None
requires = IS_EMPTY_OR(requires)
dummy_field.requires = requires
# Helper to extract widget options
widget_opts = lambda keys: {k: v for k, v in options.items() if k in keys}
# Instantiate the widget
if widget == "groupedopts" or not widget and "cols" in options:
from .s3widgets import S3GroupedOptionsWidget
w_opts = widget_opts(("cols",
"help_field",
"multiple",
"orientation",
"size",
"sort",
"table",
))
w = S3GroupedOptionsWidget(**w_opts)
elif widget == "hierarchy":
from .s3widgets import S3HierarchyWidget
w_opts = widget_opts(("multiple",
"filter",
"leafonly",
"cascade",
"represent",
))
w_opts["lookup"] = component.tablename
w = S3HierarchyWidget(**w_opts)
elif widget == "cascade":
from .s3widgets import S3CascadeSelectWidget
w_opts = widget_opts(("levels",
"multiple",
"filter",
"leafonly",
"cascade",
"represent",
))
w_opts["lookup"] = component.tablename
w = S3CascadeSelectWidget(**w_opts)
else:
# Default to multiselect
from .s3widgets import S3MultiSelectWidget
w_opts = widget_opts(("multiple",
"search",
"header",
"selectedList",
"noneSelectedText",
"columns",
"create",
))
w = S3MultiSelectWidget(**w_opts)
# Render the widget
attr = dict(attributes)
attr["_id"] = field.name
if not link.table[options.field].writable:
_class = attr.get("_class", None)
if _class:
attr["_class"] = "%s hide" % _class
else:
attr["_class"] = "hide"
widget = w(dummy_field, value, **attr)
if hasattr(widget, "add_class"):
widget.add_class("inline-link")
# Append the attached script to jquery_ready
script = options.get("script")
if script:
current.response.s3.jquery_ready.append(script)
return widget
# -------------------------------------------------------------------------
def validate(self, form):
"""
Validate this link, currently only checking whether it has
a value when required=True
@param form: the form
"""
required = self.options.required
if not required:
return
fname = self._formname(separator="_")
values = form.vars.get(fname)
if not values:
error = current.T("Value Required") \
if required is True else required
form.errors[fname] = error
# -------------------------------------------------------------------------
def accept(self, form, master_id=None, format=None):
"""
Post-processes this subform element against the POST data,
and create/update/delete any related records.
@param form: the master form
@param master_id: the ID of the master record in the form
@param format: the data format extension (for audit)
@todo: implement audit
"""
s3db = current.s3db
# Name of the real input field
fname = self._formname(separator="_")
resource = self.resource
success = False
if fname in form.vars:
# Extract the new values from the form
values = form.vars[fname]
if values is None:
values = []
elif not isinstance(values, (list, tuple, set)):
values = [values]
values = set(str(v) for v in values)
# Get the link table
component, link = self.get_link()
# Get the master identity (pkey)
pkey = component.pkey
if pkey == resource._id.name:
master = {pkey: master_id}
else:
# Different pkey (e.g. super-key) => reload the master
query = (resource._id == master_id)
master = current.db(query).select(resource.table[pkey],
limitby = (0, 1),
).first()
if master:
# Find existing links
query = FS(component.lkey) == master[pkey]
lresource = s3db.resource(link.tablename,
filter = query,
)
rows = lresource.select([component.rkey], as_rows=True)
# Determine which to delete and which to add
if rows:
rkey = link.table[component.rkey]
current_ids = set(str(row[rkey]) for row in rows)
delete = current_ids - values
insert = values - current_ids
else:
delete = None
insert = values
# Delete links which are no longer used
# @todo: apply filterby to only delete within the subset?
if delete:
query &= FS(component.rkey).belongs(delete)
lresource = s3db.resource(link.tablename,
filter = query,
)
lresource.delete()
# Insert new links
insert.discard("")
if insert:
# Insert new links
for record_id in insert:
record = {component.fkey: record_id}
link.update_link(master, record)
success = True
return success
# -------------------------------------------------------------------------
def represent(self, value):
"""
Read-only representation of this subform.
@param value: the value as returned from extract()
@return: the read-only representation
"""
component, link = self.get_link()
# Use the represent of rkey if it supports bulk, otherwise
# instantiate an S3Represent from scratch:
rkey = link.table[component.rkey]
represent = rkey.represent
if not hasattr(represent, "bulk"):
# Pick the first field from the list that is available:
lookup_field = None
for fname in ("name", "tag"):
if fname in component.fields:
lookup_field = fname
break
from .s3fields import S3Represent
represent = S3Represent(lookup = component.tablename,
fields = [lookup_field],
)
# Represent all values
if isinstance(value, (list, tuple, set)):
result = represent.bulk(list(value))
if None not in value:
result.pop(None, None)
else:
result = represent.bulk([value])
# Sort them
def labels_sorted(labels):
try:
s = sorted(labels)
except TypeError:
if any(isinstance(l, DIV) for l in labels):
# Don't sort labels if they contain markup
s = labels
else:
s = sorted(s3_str(l) if l is not None else "-" for l in labels)
return s
labels = labels_sorted(result.values())
if self.options.get("render_list"):
if value is None or value == [None]:
# Don't render as list if empty
return current.messages.NONE
else:
# Render as HTML list
return UL([LI(l) for l in labels],
_class = "s3-inline-link",
)
else:
# Render as comma-separated list of strings
# (using TAG rather than join() to support HTML labels)
return TAG[""](list(chain.from_iterable([[l, ", "]
for l in labels]))[:-1])
# -------------------------------------------------------------------------
def get_options(self):
"""
Get the options for the widget
@return: dict {value: representation} of options
"""
resource = self.resource
component, link = self.get_link()
rkey = link.table[component.rkey]
# Lookup rkey options from rkey validator
opts = []
requires = rkey.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
validator = requires[0]
if isinstance(validator, IS_EMPTY_OR):
validator = validator.other
try:
opts = validator.options()
except:
pass
# Filter these options?
widget_opts_get = self.options.get
filterby = widget_opts_get("filterby")
filteropts = widget_opts_get("options")
filterexpr = widget_opts_get("match")
if filterby and \
(filteropts is not None or filterexpr and resource._rows):
# filterby is a field selector for the component
# that shall match certain conditions
filter_selector = FS(filterby)
filter_query = None
if filteropts is not None:
# filterby-field shall match one of the given filteropts
if isinstance(filteropts, (list, tuple, set)):
filter_query = (filter_selector.belongs(list(filteropts)))
else:
filter_query = (filter_selector == filteropts)
elif filterexpr:
# filterby-field shall match one of the values for the
# filterexpr-field of the master record
rfield = resource.resolve_selector(filterexpr)
colname = rfield.colname
rows = resource.select([filterexpr], as_rows=True)
values = set(row[colname] for row in rows)
values.discard(None)
if values:
filter_query = (filter_selector.belongs(values)) | \
(filter_selector == None)
# Select the filtered component rows
filter_resource = current.s3db.resource(component.tablename,
filter = filter_query,
)
rows = filter_resource.select(["id"], as_rows=True)
filtered_opts = []
values = set(str(row[component.table._id]) for row in rows)
for opt in opts:
if str(opt[0]) in values:
filtered_opts.append(opt)
opts = filtered_opts
return dict(opts)
# -------------------------------------------------------------------------
def get_link(self):
"""
Find the target component and its linktable
@return: tuple of S3Resource instances (component, link)
"""
selector = self.selector
try:
component = self.resource.components[selector]
except KeyError:
raise SyntaxError("Undefined component: %s" % selector)
link = component.link
if not link:
# @todo: better error message
raise SyntaxError("No linktable for %s" % selector)
return (component, link)
# =============================================================================
class S3WithIntro(S3SQLFormElement):
"""
Wrapper for widgets to add an introductory text above them
"""
def __init__(self, widget, intro=None, cmsxml=False):
"""
Constructor
@param widget: the widget
@param intro: the intro, string|DIV|tuple,
if specified as tuple (module, resource, name),
the intro text will be looked up from CMS
@param cmsxml: do not XML-escape CMS contents, should only
be used with safe origin content (=normally never)
"""
self.widget = widget
self.intro = intro
self.cmsxml = cmsxml
# -------------------------------------------------------------------------
def resolve(self, resource):
"""
Override S3SQLFormElement.resolve() to map to widget
@param resource: the S3Resource to resolve this form element
against
"""
resolved = self.widget.resolve(resource)
field = resolved[2]
if field:
field.widget = self
return resolved
# -------------------------------------------------------------------------
def __getattr__(self, key):
"""
Attribute access => map to widget
@param key: the attribute key
"""
if key in self.__dict__:
return self.__dict__[key]
sentinel = object()
value = getattr(self.widget, key, sentinel)
if value is sentinel:
raise AttributeError
return value
# -------------------------------------------------------------------------
def __call__(self, *args, **kwargs):
"""
Widget renderer => map to widget, then add intro
"""
w = self.widget(*args, **kwargs)
intro = self.intro
if isinstance(intro, tuple):
if len(intro) == 3 and current.deployment_settings.has_module("cms"):
intro = self.get_cms_intro(intro)
else:
intro = None
if intro:
return TAG[""](DIV(intro,
_class = "s3-widget-intro",
), w)
else:
return w
# -------------------------------------------------------------------------
def get_cms_intro(self, intro):
"""
Get intro from CMS
@param intro: the intro spec as tuple (module, resource, postname)
"""
# Get intro text from CMS
db = current.db
s3db = current.s3db
ctable = s3db.cms_post
ltable = s3db.cms_post_module
join = ltable.on((ltable.post_id == ctable.id) & \
(ltable.module == intro[0]) & \
(ltable.resource == intro[1]) & \
(ltable.deleted == False))
query = (ctable.name == intro[2]) & \
(ctable.deleted == False)
row = db(query).select(ctable.body,
join = join,
cache = s3db.cache,
limitby = (0, 1),
).first()
if not row:
return None
return XML(row.body) if self.cmsxml else row.body
# END =========================================================================
|
{
"content_hash": "733150805f0bf1cc429ee4614a35628f",
"timestamp": "",
"source": "github",
"line_count": 4454,
"max_line_length": 147,
"avg_line_length": 37.63807813201616,
"alnum_prop": 0.44738725841088045,
"repo_name": "flavour/eden",
"id": "5364f7d1b2bc5f2c8dc396c232f460ee9df4aa1c",
"size": "167665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/s3/s3forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "3351335"
},
{
"name": "HTML",
"bytes": "1367727"
},
{
"name": "JavaScript",
"bytes": "20109418"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "31407527"
},
{
"name": "Ruby",
"bytes": "8291"
},
{
"name": "Shell",
"bytes": "5059"
},
{
"name": "XSLT",
"bytes": "3274119"
}
],
"symlink_target": ""
}
|
r"""Implement a MSM class that builds a Markov state models from
microstate trajectories automatically computes important properties
and provides them for later access.
.. moduleauthor:: F. Noe <frank DOT noe AT fu-berlin DOT de>
"""
__docformat__ = "restructuredtext en"
import numpy as _np
from pyemma.msm.models.hmsm import HMSM as _HMSM
from pyemma._base.model import SampledModel as _SampledModel
from pyemma.util.types import is_iterable
class SampledHMSM(_HMSM, _SampledModel):
def __init__(self, samples, ref=None, conf=0.95):
r""" Constructs a sampled HMSM
Parameters
----------
samples : list of HMSM
Sampled HMSM objects
ref : HMSM
Single-point estimator, e.g. containing a maximum likelihood HMSM.
If not given, the sample mean will be used.
conf : float, optional, default=0.95
Confidence interval. By default two-sigma (95.4%) is used.
Use 95.4% for two sigma or 99.7% for three sigma.
"""
# validate input
assert is_iterable(samples), 'samples must be a list of MSM objects, but is not.'
assert isinstance(samples[0], _HMSM), 'samples must be a list of MSM objects, but is not.'
# construct superclass 1
_SampledModel.__init__(self, samples, conf=conf)
# construct superclass 2
if ref is None:
Pref = self.sample_mean('P')
pobsref = self.sample_mean('pobs')
_HMSM.__init__(self, Pref, pobsref, dt_model=samples[0].dt_model)
else:
_HMSM.__init__(self, ref.Pref, ref.pobs, dt_model=ref.dt_model)
# TODO: maybe rename to parametrize in order to avoid confusion with set_params that has a different behavior?
def set_model_params(self, samples=None, conf=0.95,
P=None, pobs=None, pi=None, reversible=None, dt_model='1 step', neig=None):
"""
Parameters
----------
samples : list of MSM objects
sampled MSMs
conf : float, optional, default=0.68
Confidence interval. By default one-sigma (68.3%) is used. Use 95.4% for two sigma or 99.7% for three sigma.
"""
# set model parameters of superclass
_SampledModel.set_model_params(self, samples=samples, conf=conf)
_HMSM.set_model_params(self, P=P, pobs=pobs, pi=pi, reversible=reversible, dt_model=dt_model, neig=neig)
# def _do_sample_eigendecomposition(self):
# """Conducts the eigenvalue decompositions for all sampled matrices.
#
# Stores all eigenvalues, left and right eigenvectors for all sampled matrices
#
# """
# from pyemma.msm.analysis import rdl_decomposition
# from pyemma.util import linalg
#
# # left eigenvectors
# self._sample_Ls = _np.empty((self._nsamples, self._nstates, self._nstates), dtype=float)
# # eigenvalues
# self._sample_eigenvalues = _np.empty((self._nsamples, self._nstates), dtype=float)
# # right eigenvectors
# self._sample_Rs = _np.empty((self._nsamples, self._nstates, self._nstates), dtype=float)
#
# for i in range(self._nsamples):
# if self._reversible:
# R, D, L = rdl_decomposition(self._sample_Ps[i], norm='reversible')
# # everything must be real-valued
# R = R.real
# D = D.real
# L = L.real
# else:
# R, D, L = rdl_decomposition(self._sample_Ps[i], norm='standard')
# # assign ordered
# I = linalg.match_eigenvectors(self.eigenvectors_right, R,
# w_ref=self.stationary_distribution, w=self._sample_mus[i])
# self._sample_Ls[i, :, :] = L[I, :]
# self._sample_eigenvalues[i, :] = _np.diag(D)[I]
# self._sample_Rs[i, :, :] = R[:, I]
#
# def set_confidence(self, conf):
# self._confidence = conf
#
# @property
# def nsamples(self):
# r""" Number of samples """
# return self._nsamples
#
# @property
# def confidence_interval(self):
# r""" Confidence interval used """
# return self._confidence
#
# @property
# def stationary_distribution_samples(self):
# r""" Samples of the initial distribution """
# return self._sample_mus
#
# @property
# def stationary_distribution_mean(self):
# r""" The mean of the initial distribution of the hidden states """
# return _np.mean(self.stationary_distribution_samples, axis=0)
#
# @property
# def stationary_distribution_std(self):
# r""" The standard deviation of the initial distribution of the hidden states """
# return _np.std(self.stationary_distribution_samples, axis=0)
#
# @property
# def stationary_distribution_conf(self):
# r""" The confidence interval of the initial distribution of the hidden states """
# return confidence_interval(self.stationary_distribution_samples, alpha=self._confidence)
#
# @property
# def transition_matrix_samples(self):
# r""" Samples of the transition matrix """
# return self._sample_Ps
#
# @property
# def transition_matrix_mean(self):
# r""" The mean of the transition_matrix of the hidden states """
# return _np.mean(self.transition_matrix_samples, axis=0)
#
# @property
# def transition_matrix_std(self):
# r""" The standard deviation of the transition_matrix of the hidden states """
# return _np.std(self.transition_matrix_samples, axis=0)
#
# @property
# def transition_matrix_conf(self):
# r""" The confidence interval of the transition_matrix of the hidden states """
# return confidence_interval(self.transition_matrix_samples, alpha=self._confidence)
#
# @property
# def output_probabilities_samples(self):
# r""" Samples of the output probability matrix """
# return self._sample_pobs
#
# @property
# def output_probabilities_mean(self):
# r""" The mean of the output probability matrix """
# return _np.mean(self.output_probabilities_samples, axis=0)
#
# @property
# def output_probabilities_std(self):
# r""" The standard deviation of the output probability matrix """
# return _np.std(self.output_probabilities_samples, axis=0)
#
# @property
# def output_probabilities_conf(self):
# r""" The standard deviation of the output probability matrix """
# return confidence_interval(self.output_probabilities_samples, alpha=self._confidence)
#
# @property
# def eigenvalues_samples(self):
# r""" Samples of the eigenvalues """
# return self._sample_eigenvalues
#
# @property
# def eigenvalues_mean(self):
# r""" The mean of the eigenvalues of the hidden states """
# return _np.mean(self.eigenvalues_samples, axis=0)
#
# @property
# def eigenvalues_std(self):
# r""" The standard deviation of the eigenvalues of the hidden states """
# return _np.std(self.eigenvalues_samples, axis=0)
#
# @property
# def eigenvalues_conf(self):
# r""" The confidence interval of the eigenvalues of the hidden states """
# return confidence_interval(self.eigenvalues_samples, alpha=self._confidence)
#
# @property
# def eigenvectors_left_samples(self):
# r""" Samples of the left eigenvectors of the hidden transition matrix """
# return self._sample_Ls
#
# @property
# def eigenvectors_left_mean(self):
# r""" The mean of the left eigenvectors of the hidden transition matrix """
# return _np.mean(self.eigenvectors_left_samples, axis=0)
#
# @property
# def eigenvectors_left_std(self):
# r""" The standard deviation of the left eigenvectors of the hidden transition matrix """
# return _np.std(self.eigenvectors_left_samples, axis=0)
#
# @property
# def eigenvectors_left_conf(self):
# r""" The confidence interval of the left eigenvectors of the hidden transition matrix """
# return confidence_interval(self.eigenvectors_left_samples, alpha=self._confidence)
#
# @property
# def eigenvectors_right_samples(self):
# r""" Samples of the right eigenvectors of the hidden transition matrix """
# return self._sample_Rs
#
# @property
# def eigenvectors_right_mean(self):
# r""" The mean of the right eigenvectors of the hidden transition matrix """
# return _np.mean(self.eigenvectors_right_samples, axis=0)
#
# @property
# def eigenvectors_right_std(self):
# r""" The standard deviation of the right eigenvectors of the hidden transition matrix """
# return _np.std(self.eigenvectors_right_samples, axis=0)
#
# @property
# def eigenvectors_right_conf(self):
# r""" The confidence interval of the right eigenvectors of the hidden transition matrix """
# return confidence_interval(self.eigenvectors_right_samples, alpha=self._confidence)
#
# @property
# def timescales_samples(self):
# r""" Samples of the timescales """
# return -self.lagtime / _np.log(_np.abs(self._sample_eigenvalues[:,1:]))
#
# @property
# def timescales_mean(self):
# r""" The mean of the timescales of the hidden states """
# return _np.mean(self.timescales_samples, axis=0)
#
# @property
# def timescales_std(self):
# r""" The standard deviation of the timescales of the hidden states """
# return _np.std(self.timescales_samples, axis=0)
#
# @property
# def timescales_conf(self):
# r""" The confidence interval of the timescales of the hidden states """
# return confidence_interval(self.timescales_samples, alpha=self._confidence)
#
# @property
# def lifetimes_samples(self):
# r""" Samples of the lifetimes """
# res = _np.empty((self.nsamples, self.nstates), dtype=float)
# for i in range(self.nsamples):
# res[i,:] = -self._lag / _np.log(_np.diag(self._sample_Ps[i]))
# return res
#
# @property
# def lifetimes_mean(self):
# r""" The mean of the lifetimes of the hidden states """
# return _np.mean(self.lifetimes_samples, axis=0)
#
# @property
# def lifetimes_std(self):
# r""" The standard deviation of the lifetimes of the hidden states """
# return _np.std(self.lifetimes_samples, axis=0)
#
# @property
# def lifetimes_conf(self):
# r""" The confidence interval of the lifetimes of the hidden states """
# return confidence_interval(self.lifetimes_samples, alpha=self._confidence)
|
{
"content_hash": "9b685a86184ba279831336a322cf0dfc",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 120,
"avg_line_length": 39.81818181818182,
"alnum_prop": 0.6125114155251141,
"repo_name": "arokem/PyEMMA",
"id": "27fafe628ab356282b4cda00d75043d30d03467e",
"size": "12365",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "pyemma/msm/models/hmsm_sampled.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "39352"
},
{
"name": "Python",
"bytes": "1138398"
}
],
"symlink_target": ""
}
|
from core_serializers import fields, serializers
from core_serializers.utils import BasicObject
import pytest
class HTMLDict(dict):
"""
A mock MultiDict that can be used for representing HTML input.
"""
getlist = None
class TestField:
def setup(self):
self.field = fields.Field()
def test_validate(self):
"""
By default a field should simply return the data it validates.
"""
assert self.field.validate(123) == 123
def test_validate_no_data(self):
"""
By default a field should raise a ValidationError if no data is
passed to it when validating.
"""
with pytest.raises(fields.ValidationError):
assert self.field.validate()
def test_serialize(self):
"""
By default a field should simply return the data it serializes.
"""
assert self.field.to_primative(123) == 123
class TestNotRequired:
def setup(self):
class TestSerializer(serializers.Serializer):
optional = fields.IntegerField(required=False)
mandatory = fields.IntegerField()
self.Serializer = TestSerializer
def test_validate_read_only(self):
"""
Non-required fields may be omitted in validation.
"""
data = {'mandatory': 123}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'mandatory': 123}
class TestReadOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
read_only = fields.Field(read_only=True)
writable = fields.IntegerField()
self.serializer = TestSerializer()
def test_validate_read_only(self):
"""
Read-only fields should not be included in validation.
"""
data = {'read_only': 123, 'writable': 456}
validated = self.serializer.validate(data)
assert validated == {'writable': 456}
def test_serialize_read_only(self):
"""
Read-only fields should be serialized.
"""
obj = BasicObject(read_only=123, writable=456)
data = self.serializer.to_primative(obj)
assert data == {'read_only': 123, 'writable': 456}
class TestWriteOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
write_only = fields.IntegerField(write_only=True)
readable = fields.IntegerField()
self.serializer = TestSerializer()
def test_validate_write_only(self):
"""
Write-only fields should be included in validation.
"""
data = {'write_only': 123, 'readable': 456}
validated = self.serializer.validate(data)
assert validated == {'write_only': 123, 'readable': 456}
def test_serialize_write_only(self):
"""
Write-only fields should not be serialized.
"""
obj = BasicObject(write_only=123, readable=456)
data = self.serializer.to_primative(obj)
assert data == {'readable': 456}
class TestDefault:
def setup(self):
class TestSerializer(serializers.Serializer):
default = fields.IntegerField(default=123)
no_default = fields.IntegerField()
self.serializer = TestSerializer()
def test_validate_default(self):
"""
A default value should be used if no value is passed in validation.
"""
data = {'no_default': 456}
validated = self.serializer.validate(data)
assert validated == {'default': 123, 'no_default': 456}
def test_validate_default_not_used(self):
"""
A default value should not be used if a value is passed in validation.
"""
data = {'default': 0, 'no_default': 456}
validated = self.serializer.validate(data)
assert validated == {'default': 0, 'no_default': 456}
class TestInitial:
def setup(self):
class TestSerializer(serializers.Serializer):
initial_field = fields.IntegerField(initial=123)
blank_field = fields.IntegerField()
self.serializer = TestSerializer()
def test_initial(self):
"""
Initial values should be included when serializing a new representation.
"""
assert self.serializer.data == {
'initial_field': 123,
'blank_field': None
}
class TestLabel:
def setup(self):
class TestSerializer(serializers.Serializer):
labeled = fields.IntegerField(label='My label')
self.serializer = TestSerializer()
def test_label(self):
"""
A field's label may be set with the `label` argument.
"""
fields = self.serializer.fields
assert fields['labeled'].label == 'My label'
class TestInvalidErrorKey:
def setup(self):
class ExampleField(serializers.Field):
def to_native(self, data):
self.fail('incorrect')
self.field = ExampleField()
def test_invalid_error_key(self):
"""
If a field raises a validation error, but does not have a corresponding
error message, then raise an appropriate assertion error.
"""
with pytest.raises(AssertionError) as exc_info:
self.field.to_native(123)
expected = (
'ValidationError raised by `ExampleField`, but error key '
'`incorrect` does not exist in the `MESSAGES` dictionary.'
)
assert str(exc_info.value) == expected
class TestBooleanHTMLInput:
def setup(self):
class TestSerializer(serializers.Serializer):
archived = fields.BooleanField()
self.serializer = TestSerializer()
def test_empty_html_checkbox(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField.
"""
data = HTMLDict()
validated = self.serializer.validate(data)
assert validated == {'archived': False}
class TestMethodField:
def setup(self):
class TestSerializer(serializers.Serializer):
example_method_field = fields.MethodField()
def get_example_method_field(self, instance):
return repr(instance)
self.serializer = TestSerializer()
def test_method_field(self):
obj = serializers.BasicObject(a=1)
assert self.serializer.to_primative(obj) == {
'example_method_field': "<BasicObject 'a': 1>"
}
|
{
"content_hash": "d0f14e678bc1f4bd13709cc2bc4ac048",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 80,
"avg_line_length": 31.25358851674641,
"alnum_prop": 0.6114513165952236,
"repo_name": "pombredanne/core-serializers",
"id": "2a9697df5c5226544ae2334690379f72a418c5a5",
"size": "6532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fields.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "10167"
},
{
"name": "Python",
"bytes": "57906"
}
],
"symlink_target": ""
}
|
""" Copyright (c) 2014-2021 Geir Skjotskift
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import datetime
from dataclasses import dataclass
from typing import Any, Dict, Iterator, List, Optional, Text, Tuple
import requests
import RashlyOutlaid
import RashlyOutlaid.libwhois
@dataclass
class ASNRecord:
"""Dataclass containing the ASNRecord"""
asn: str
prefix: str
asname: str
cn: str
isp: str
peers: List[str]
def astuple(self) -> Tuple:
"""return the fields as a tuple"""
return (self.asn, self.prefix, self.asname, self.cn, self.isp, self.peers)
def __iter__(self) -> Iterator:
"""iterate over the tuple"""
yield from self.astuple()
@dataclass
class AVRecord:
"""Dataclass continating the AVRecord"""
md5: Optional[str]
vendor: str
signature: str
timestamp: Optional[datetime.datetime]
def astuple(self) -> Tuple:
"""return the fields as a tuple"""
return (self.md5, self.vendor, self.signature, self.timestamp)
def __iter__(self) -> Iterator:
"""iterator over the tuple"""
yield from self.astuple()
@dataclass
class MalwareRecord:
timestamp: Optional[datetime.datetime]
first_seen: Optional[datetime.datetime]
last_seen: Optional[datetime.datetime]
type: str
sha256: str
md5: str
sha1: str
pehash: str
tlsh: str
import_hash: str
entropy: str
filesize: str
adobe_malware_classifier: str
magic: str
anti_virus: List[AVRecord]
def astuple(self) -> Tuple:
"""return the fields as a tuple"""
return (
self.timestamp,
self.first_seen,
self.last_seen,
self.type,
self.sha256,
self.md5,
self.sha1,
self.pehash,
self.tlsh,
self.import_hash,
self.entropy,
self.filesize,
self.adobe_malware_classifier,
self.magic,
self.anti_virus,
)
def __iter__(self) -> Iterator:
"""iterate over the tuple"""
yield from self.astuple()
def parse_shadowserver_time(time_string: Text) -> Optional[datetime.datetime]:
"""Parse a date on the format '2018-10-17 20:36:23'"""
if not time_string:
return None
try:
return datetime.datetime.strptime(time_string[:19], "%Y-%m-%d %H:%M:%S")
except:
print(time_string)
raise
def malware(hashes: List[Text], **kwargs_requests: Any) -> List[MalwareRecord]:
"""Lookup the list of hashes using the Shadowserver malware API
You can pass arguments to requests suchs as proxies:
api.malware(["4b21f25e02b0d1df86ab745f82e140ab1cc498af"],
proxies={'http': 'http://myproxy.example.com:8080',
'https': 'http://myproxy.example.com:8080'
})
https://www.shadowserver.org/what-we-do/network-reporting/api-asn-and-network-queries/
"""
url = f"https://api.shadowserver.org/malware/info" f'?sample={",".join(hashes)}'
res = requests.get(url, **kwargs_requests)
if res.status_code != 200:
msg = (
f"RashlyOutlaid.api.malware could not lookup {hashes}. "
f"Got status='{res.status_code}' while requesting '{url}'"
)
raise RashlyOutlaid.libwhois.QueryError(msg)
ss_data: Dict = res.json()
return [
MalwareRecord(
parse_shadowserver_time(elem.get("timestamp", "")),
parse_shadowserver_time(elem.get("first_seen", "")),
parse_shadowserver_time(elem.get("last_seen", "")),
elem.get("type"),
elem.get("sha256"),
elem.get("md5"),
elem.get("sha1"),
elem.get("pehash"),
elem.get("tlsh"),
elem.get("import_hash"),
elem.get("entropic"),
elem.get("filesize"),
elem.get("adobe_malware_classifier"),
elem.get("magic"),
[
AVRecord(
x.get("md5"),
x.get("vendor"),
x.get("signature"),
parse_shadowserver_time(x.get("timestamp", "")),
)
for x in elem["anti_virus"]
],
)
for elem in ss_data
]
def _map_shadowserver_model(ssdata: List[Dict]) -> List[ASNRecord]:
"""Map the result from shadowserver to a list of ASNRecords"""
return [
ASNRecord(
x.get("asn", ""),
x.get("prefix", ""),
x.get("asname_short", ""),
x.get("geo", ""),
x.get("asname_long", ""),
x.get("peer", "").split(),
)
for x in ssdata
]
def origin(ip_addresses: List, **kwargs_requests: Any) -> List[ASNRecord]:
"""Lookup the list of ip addresses vs the Shadowserver origin web api
https://www.shadowserver.org/what-we-do/network-reporting/api-asn-and-network-queries/
You can pass arguments to requests suchs as proxies:
api.origin(["8.8.8.8"],
proxies={'http': 'http://myproxy.example.com:8080',
'https': 'http://myproxy.example.com:8080'
})
"""
url = f"https://api.shadowserver.org/net/asn" f"?origin={','.join(ip_addresses)}"
res = requests.get(url, **kwargs_requests)
if res.status_code != 200:
msg = (
f"RashlyOutlaid.api.origin could not lookup origin. "
f"Got status='{res.status_code}' while requesting '{url}'"
)
raise RashlyOutlaid.libwhois.QueryError(msg)
ss_data: List[Dict] = res.json()
return _map_shadowserver_model(ss_data)
def peer(ip_addresses: List, **kwargs_requests: Any) -> List[ASNRecord]:
"""Lookup the list of ip addresses vs the Shadowserver peer web api
https://www.shadowserver.org/what-we-do/network-reporting/api-asn-and-network-queries/
You can pass arguments to requests suchs as proxies:
api.peer(["8.8.8.8"],
proxies={'http': 'http://myproxy.example.com:8080',
'https': 'http://myproxy.example.com:8080'
})
"""
url = f"https://api.shadowserver.org/net/asn" f"?peer={','.join(ip_addresses)}"
res = requests.get(url, **kwargs_requests)
if res.status_code != 200:
msg = (
f"RashlyOutlaid.api.peer could not lookup peers "
f"of {ip_addresses}. "
f"Got status='{res.status_code}' while requesting '{url}'"
)
raise RashlyOutlaid.libwhois.QueryError(msg)
ss_data: List[Dict] = res.json()
return _map_shadowserver_model(ss_data)
def asn(asnumber: int, **kwargs_requests) -> List[ASNRecord]:
"""Lookup the asn via the Shadowserver asn web api
https://www.shadowserver.org/what-we-do/network-reporting/api-asn-and-network-queries/
You can pass arguments to requests suchs as proxies:
api.asn(12345,
proxies={'http': 'http://myproxy.example.com:8080',
'https': 'http://myproxy.example.com:8080'
})
"""
url = f"https://api.shadowserver.org/net/asn" f"?query={asnumber}"
res = requests.get(url, **kwargs_requests)
if res.status_code != 200:
msg = (
f"RashlyOutlaid.api.asn could not lookup asn {asnumber}. "
f"Got status='{res.status_code}' while requesting '{url}'"
)
raise RashlyOutlaid.libwhois.QueryError(msg)
ss_data: List[Dict] = [res.json()]
return _map_shadowserver_model(ss_data)
def prefix(asnumber: int, **kwargs_requests: Any) -> List[Text]:
"""Lookup the list of ip addresses vs the Shadowserver prefix web api
https://www.shadowserver.org/what-we-do/network-reporting/api-asn-and-network-queries/
You can pass arguments to requests suchs as proxies:
api.prefix(12345,
proxies={'http': 'http://myproxy.example.com:8080',
'https': 'http://myproxy.example.com:8080'
})
"""
url = f"https://api.shadowserver.org/net/asn" f"?prefix={asnumber}"
res = requests.get(url, **kwargs_requests)
if res.status_code != 200:
msg = (
f"RashlyOutlaid.api.prefix could not lookup asn {asnumber}. "
f"Got status='{res.status_code}' while requesting '{url}'"
)
raise RashlyOutlaid.libwhois.QueryError(msg)
ss_data: List[Text] = res.json()
return ss_data
|
{
"content_hash": "db3dd474597cbeeeb685c6a11f7ede09",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 90,
"avg_line_length": 30.784565916398712,
"alnum_prop": 0.5975558805097138,
"repo_name": "bunzen/RashlyOutlaid",
"id": "7b1e7bc27162e8ef077bce85a3d302d4d470de96",
"size": "9574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RashlyOutlaid/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33813"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
import re
from GitSavvy.core.git_command import mixin_base
Stash = namedtuple("Stash", ("id", "description"))
class StashMixin(mixin_base):
def get_stashes(self):
"""
Return a list of stashes in the repo.
"""
stdout = self.git("stash", "list")
stashes = []
for entry in stdout.split("\n"):
if not entry:
continue
match = re.match("^stash@\\{(\\d+)}: (.*?: )?(.*)", entry)
assert match
num, _, description = match.groups()
stashes.append(Stash(num, description))
return stashes
def show_stash(self, id):
stash_name = "stash@{{{}}}".format(id)
return self.git("stash", "show", "--no-color", "-p", stash_name)
def apply_stash(self, id):
"""
Apply stash with provided id.
"""
self.git("stash", "apply", "stash@{{{}}}".format(id))
def pop_stash(self, id):
"""
Pop stash with provided id.
"""
self.git("stash", "pop", "stash@{{{}}}".format(id))
def create_stash(self, description, include_untracked=False):
"""
Create stash with provided description from working files.
"""
self.git("stash", "save", "-k", "-u" if include_untracked else None, description)
def drop_stash(self, id):
"""
Drop stash with provided id.
"""
return self.git("stash", "drop", "stash@{{{}}}".format(id))
|
{
"content_hash": "925e101c0f6e8792ec58df59254669ac",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 89,
"avg_line_length": 28.660377358490567,
"alnum_prop": 0.5306122448979592,
"repo_name": "divmain/GitSavvy",
"id": "1a08fb31e7a2d4c3ecc924e15e7111e3eaabadd1",
"size": "1519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/git_mixins/stash.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "118"
},
{
"name": "HTML",
"bytes": "13504"
},
{
"name": "Python",
"bytes": "646248"
},
{
"name": "Shell",
"bytes": "2011"
}
],
"symlink_target": ""
}
|
'''
============================================
General vs. Additive Unscented Kalman Filter
============================================
This example shows the difference between the general Unscented Kalman Filter
and the Additive Unscented Kalman Filter.
The general Unscented Kalman Filter (UKF) places no limitations on how noise
interacts with the model. While this provides a wider range of potential
models, it comes at the cost of additional computational burden. If your model
contains additive noise, the Additive Unscented Kalman Filter (AddUKF) allows
one to exploit that to reduce computational complexity. While results are not
guaranteed to be the same for both methods, they are very similar.
The figure drawn shows the true, hidden state; the state estimates given by the
UKF; and finally the same given by the UKS.
'''
import numpy as np
import pylab as pl
from pykalman import AdditiveUnscentedKalmanFilter, UnscentedKalmanFilter
# initialize parameters
def transition_function(state, noise):
a = state[0] * np.sin(state[1]) + noise[0]
b = state[1] + noise[1]
return np.array([a, b])
def observation_function(state, noise):
C = np.array([[-1, 0.5], [0.2, 0.1]])
return np.dot(C, state) + noise
def additive_transition_function(state):
return transition_function(state, np.array([0, 0]))
def additive_observation_function(state):
return observation_function(state, np.array([0, 0]))
transition_covariance = np.eye(2)
random_state = np.random.RandomState(0)
observation_covariance = np.eye(2) + random_state.randn(2, 2) * 0.1
initial_state_mean = [0, 0]
initial_state_covariance = [[1, 0.1], [ 0.1, 1]]
# sample from model
ukf = UnscentedKalmanFilter(
transition_function, observation_function,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance,
random_state=random_state
)
akf = AdditiveUnscentedKalmanFilter(
additive_transition_function, additive_observation_function,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance
)
states, observations = ukf.sample(50, initial_state_mean)
# estimate state with filtering
ukf_state_estimates = ukf.filter(observations)[0]
akf_state_estimates = akf.filter(observations)[0]
# draw estimates
pl.figure()
lines_true = pl.plot(states, color='b')
lines_ukf = pl.plot(ukf_state_estimates, color='r', ls='-')
lines_akf = pl.plot(akf_state_estimates, color='g', ls='-.')
pl.legend((lines_true[0], lines_ukf[0], lines_akf[0]),
('true', 'UKF', 'AddUKF'),
loc='upper left'
)
pl.show()
|
{
"content_hash": "e98131ec441cb87a37d0213bcb87bb0b",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 79,
"avg_line_length": 36.18055555555556,
"alnum_prop": 0.7124760076775432,
"repo_name": "nils-werner/pykalman",
"id": "cd1c5b2039c3121d280d5fefa2359492dd45911c",
"size": "2605",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/unscented/plot_additive.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "4456"
},
{
"name": "Python",
"bytes": "239762"
}
],
"symlink_target": ""
}
|
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
clf = PassiveAggressiveClassifier(
C=1.0, max_iter=30, fit_intercept=fit_intercept,
random_state=1, average=average, tol=None)
clf.fit(data, y)
score = clf.score(data, y)
assert score > 0.79
if average:
assert hasattr(clf, '_average_coef')
assert hasattr(clf, '_average_intercept')
assert hasattr(clf, '_standard_intercept')
assert hasattr(clf, '_standard_coef')
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
for average in (False, True):
clf = PassiveAggressiveClassifier(random_state=0,
average=average,
max_iter=5)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert score > 0.79
if average:
assert hasattr(clf, '_average_coef')
assert hasattr(clf, '_average_intercept')
assert hasattr(clf, '_standard_intercept')
assert hasattr(clf, '_standard_coef')
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier(max_iter=5).fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
@pytest.mark.parametrize('loss', ("hinge", "squared_hinge"))
def test_classifier_correctness(loss):
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPassiveAggressive(loss=loss, n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(loss=loss, max_iter=2,
shuffle=False, tol=None)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier(max_iter=100)
for meth in ("predict_proba", "predict_log_proba", "transform"):
with pytest.raises(AttributeError):
getattr(clf, meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, max_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, max_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced", max_iter=100)
with pytest.raises(ValueError):
clf.partial_fit(X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(
C=0.1, tol=None, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(
C=0.1, tol=None, class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(
C=0.1, tol=None, class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5}, max_iter=100)
with pytest.raises(ValueError):
clf.fit(X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5], max_iter=100)
with pytest.raises(ValueError):
clf.fit(X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch", max_iter=100)
with pytest.raises(ValueError):
clf.fit(X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
reg = PassiveAggressiveRegressor(
C=1.0, fit_intercept=fit_intercept,
random_state=0, average=average, max_iter=5)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert np.mean((pred - y_bin) ** 2) < 1.7
if average:
assert hasattr(reg, '_average_coef')
assert hasattr(reg, '_average_intercept')
assert hasattr(reg, '_standard_intercept')
assert hasattr(reg, '_standard_coef')
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for average in (False, True):
reg = PassiveAggressiveRegressor(random_state=0,
average=average, max_iter=100)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert np.mean((pred - y_bin) ** 2) < 1.7
if average:
assert hasattr(reg, '_average_coef')
assert hasattr(reg, '_average_intercept')
assert hasattr(reg, '_standard_intercept')
assert hasattr(reg, '_standard_coef')
@pytest.mark.parametrize(
'loss',
("epsilon_insensitive", "squared_epsilon_insensitive"))
def test_regressor_correctness(loss):
y_bin = y.copy()
y_bin[y != 1] = -1
reg1 = MyPassiveAggressive(loss=loss, n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(tol=None, loss=loss, max_iter=2,
shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor(max_iter=100)
for meth in ("transform",):
with pytest.raises(AttributeError):
getattr(reg, meth)
# TODO: remove in 1.0
@pytest.mark.parametrize('klass', [PassiveAggressiveClassifier,
PassiveAggressiveRegressor])
def test_passive_aggressive_deprecated_attr(klass):
est = klass(average=True)
est.fit(X, y)
msg = "Attribute {} was deprecated"
for att in ['average_coef_', 'average_intercept_',
'standard_coef_', 'standard_intercept_']:
with pytest.warns(FutureWarning, match=msg.format(att)):
getattr(est, att)
|
{
"content_hash": "e3cef6db0cf2544953bbd0733d4966f0",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 77,
"avg_line_length": 34.89160839160839,
"alnum_prop": 0.5605772121455056,
"repo_name": "glemaitre/scikit-learn",
"id": "d0d099eeacc8d34e3d51f651f180cb5bd6e11b03",
"size": "9979",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sklearn/linear_model/tests/test_passive_aggressive.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2232"
},
{
"name": "C",
"bytes": "41025"
},
{
"name": "C++",
"bytes": "146835"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "10011694"
},
{
"name": "Shell",
"bytes": "44168"
}
],
"symlink_target": ""
}
|
import time
import unittest
from selenium import webdriver
import settings
class ModifyStudentInfo(unittest.TestCase):
def setUp(self):
self.driver = None
self.base_url = settings.test_parameters.get("education_base_url")
def test_E83_modify_the_students_information(self):
web_types = settings.test_parameters.get("web_types")
for web_type in web_types:
if web_type == 'firefox':
self.driver = webdriver.Firefox()
elif web_type == 'chrome':
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
driver = self.driver
driver.get(self.base_url)
driver.maximize_window()
###########################################
# 前置条件:登录系统,创建测试数据
###########################################
driver.find_element_by_id("input_username").clear()
driver.find_element_by_id("input_username").send_keys(settings.test_parameters.get("admin_username"))
driver.find_element_by_id("input_password").clear()
driver.find_element_by_id("input_password").send_keys(settings.test_parameters.get("admin_password"))
driver.find_element_by_id("login_btn").click()
time.sleep(5)
driver.find_element_by_link_text(u"用户管理").click()
time.sleep(1)
driver.find_element_by_link_text(u"学生").click()
time.sleep(3)
driver.find_element_by_id("create_user").click()
time.sleep(2)
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("student01")
driver.find_element_by_id("fullname").clear()
driver.find_element_by_id("fullname").send_keys("student01")
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys("student01@vinzor.com")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("123456")
driver.find_element_by_id("confirm").clear()
driver.find_element_by_id("confirm").send_keys("123456")
time.sleep(3)
driver.find_element_by_id("confirm_action").click()
time.sleep(5)
###########################################
# 步骤1:姓名、邮件输入留空,检查提示
###########################################
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("student01")
time.sleep(5)
if web_type == 'firefox':
driver.find_element_by_xpath("/html/body/div[2]/div[2]/div/div/"
"div[2]/div[2]/div[2]/div/table/tbody/tr/td[6]/div/a[1]").click()
elif web_type == 'chrome':
element = driver.find_element_by_xpath("/html/body/div[2]/div[2]/div/div/"
"div[2]/div[2]/div[2]/div/table/tbody/tr/td[6]/div/a[1]")
webdriver.ActionChains(driver).move_to_element(element).click().perform()
time.sleep(3)
self.assertEqual('true', driver.find_element_by_id("username").get_attribute("readonly"))
driver.find_element_by_id("fullname").clear()
driver.find_element_by_id("email").clear()
driver.find_element_by_id("confirm_action").click()
time.sleep(2)
self.assertEqual("请输入姓名", driver.find_element_by_id("fullname-error").text)
self.assertEqual("请输入邮件地址", driver.find_element_by_id("email-error").text)
###########################################
# 步骤2:姓名、邮件输入过长,检查提示
###########################################
driver.find_element_by_id("fullname").clear()
driver.find_element_by_id("fullname").\
send_keys("1234567890123456789012345678901234567890123456789012345678901234567890")
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").\
send_keys("12345678901234567890123456789012345678901234567890@12345678901234567.com")
driver.find_element_by_id("confirm_action").click()
time.sleep(2)
self.assertEqual("长度不超过64个字", driver.find_element_by_id("fullname-error").text)
self.assertEqual("长度1-64个字", driver.find_element_by_id("email-error").text)
###########################################
# 步骤3:邮箱编辑输入格式不正确,检查提示
###########################################
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys("test@@dfd.com")
driver.find_element_by_id("confirm_action").click()
time.sleep(2)
self.assertEqual("请输入有效的电子邮件地址", driver.find_element_by_id("email-error").text)
###########################################
# 步骤4:邮箱已被其他用户使用,检查提示
###########################################
driver.find_element_by_id("fullname").clear()
driver.find_element_by_id("fullname").send_keys("vinzor")
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys("student@example.com")
driver.find_element_by_id("confirm_action").click()
time.sleep(2)
self.assertEqual("邮件地址已被使用", driver.find_element_by_id("email-error").text)
###########################################
# 步骤5:正常合理输入,成功编辑
###########################################
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys("test11@vinzor.com")
driver.find_element_by_id("confirm_action").click()
time.sleep(3)
self.assertEqual("用户 student01 信息修改成功", driver.find_element_by_class_name("gritter-without-image").
find_element_by_tag_name("p").text)
###########################################
# 后置条件: 清理创建用户,有助多次测试
###########################################
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("student01")
time.sleep(5)
if web_type == 'firefox':
driver.find_element_by_id("select_all").click()
elif web_type == 'chrome':
element = driver.find_element_by_id("select_all")
webdriver.ActionChains(driver).move_to_element(element).click().perform()
time.sleep(3)
driver.find_element_by_id("delete_user").click()
time.sleep(3)
driver.find_element_by_id("confirm_delete").click()
time.sleep(3)
driver.quit()
def tearDown(self):
self.driver.quit()
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "abb88e1d364b37e0d2e108a5ad8edf6d",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 113,
"avg_line_length": 53.37956204379562,
"alnum_prop": 0.5114180226993026,
"repo_name": "sysuwuhaibin/vatus",
"id": "6bea60c293f113a65bce7b4eb28e9977ec54d2cc",
"size": "7684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vatus/testcases/test_education/test_user_management/test_E83_modify_the_students_information.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "214309"
}
],
"symlink_target": ""
}
|
import sys
import os
import shutil
import datetime
import glob
import zipfile
import argparse
import re
QLXL = "QuantLibXL"
VERSION = "1.8.0"
VERSION_ = "1_8_0"
#VC_VERSION = "vc120"
VC_VERSION = "vc90"
QLXL_VERSION = QLXL + "-" + VERSION
ROOT_DIR = QLXL_VERSION + "\\"
class ZipFile:
root = None
zipFile = None
def __init__(self, path, root):
self.root = root
self.zipFile = zipfile.ZipFile(path, "w", zipfile.ZIP_DEFLATED)
def __del__(self):
self.zipFile.close()
def zip(self, sourcePath, targetPath = None):
print sourcePath
if targetPath is None:
targetPath = self.root + sourcePath
self.zipFile.write(sourcePath, targetPath)
def zipGlob(self, path, excludeFiles = None):
for fileName in glob.glob(path):
if excludeFiles is not None:
for r in excludeFiles:
if r.match(fileName):
continue
print fileName
self.zip(fileName)
class Selector:
zipFile = None
inputPath = None
incDirs = None
excDirs = None
incFiles = None
excFiles = None
def __init__(self, zipFile, inputPath, incDirs=None, excDirs=None, incFiles=None, excFiles=None):
self.zipFile = zipFile
self.inputPath = inputPath
self.incDirs = incDirs
self.excDirs = excDirs
self.incFiles = incFiles
self.excFiles = excFiles
self.process()
def process(self):
for root, dirs, files in os.walk(self.inputPath):
root += "\\"
for d in reversed(dirs):
if self.excludeDir(d):
dirs.remove(d)
continue
for f in files:
if self.includeFile(f):
self.zipFile.zip(root + f)
def excludeDir(self, d):
if self.excDirs is not None:
for r in self.excDirs:
if r.match(d):
return True
if self.incDirs is None:
return False
else:
for r in self.incDirs:
if r.match(d):
return False
return True
def includeFile(self, f):
if self.excFiles is not None:
for r in self.excFiles:
if r.match(f):
return False
if self.incFiles is None:
return True
else:
for r in self.incFiles:
if r.match(f):
return True
return False
def prompt_exit(msg='', status=0):
if msg:
print msg
if sys.platform == 'win32':
raw_input('press any key to exit')
sys.exit(status)
#DELETEME
def visit(params, dirname, names):
zfile = params[0]
exclude = params[1]
strip = params[2]
if strip:
rootDir = dirname[len(strip):]
else:
rootDir = dirname
for name in names:
if exclude == name: continue
sourcePath = dirname + "/" + name
targetPath = rootDir + "/" + name
zfile.write(sourcePath, ROOT_DIR + targetPath)
def makeZipStatic():
zipFilePath = "zip/%s-%s.zip" % (QLXL_VERSION, "RateCurveFramework")
zfile = zipfile.ZipFile(zipFilePath, "w", zipfile.ZIP_DEFLATED)
# Zip up some specific files from the QuantLibXL directory.
zfile.write("Docs/QuantLibXL-docs-" + VERSION + ".chm", ROOT_DIR + "Docs/QuantLibXL-docs-" + VERSION + ".chm")
zfile.write("xll/QuantLibXL-" + VC_VERSION + "-mt-s-" + VERSION_ + ".xll", ROOT_DIR + "xll/QuantLibXL-" + VC_VERSION + "-mt-s-" + VERSION_ + ".xll")
zfile.write("zip/README.txt", ROOT_DIR + "README.txt")
# Recursively zip some subdirectories of the QuantLibXL directory.
#os.path.walk("Data", visit, (zfile, ".gitignore", None))
os.path.walk("Data2/XLS", visit, (zfile, ".gitignore", None))
os.path.walk("framework", visit, (zfile, "ReadMe.txt", None))
#os.path.walk("Workbooks", visit, (zfile, None, None))
# Zip up some files from other projects in the repo.
os.path.walk("../QuantLibAddin/gensrc/metadata", visit, (zfile, None, "../QuantLibAddin/gensrc/"))
zfile.write("../XL-Launcher/bin/Addin/Launcher.xla", ROOT_DIR + "Launcher.xla")
for fileName in glob.glob("../XL-Launcher/bin/Addin/session_file.*-s-*.xml"):
baseName = os.path.basename(fileName)
if -1 != baseName.find("-dev") or -1 != baseName.find("-x64"): continue
zfile.write("../XL-Launcher/bin/Addin/" + baseName, ROOT_DIR + baseName)
for fileName in glob.glob("../XL-Launcher/bin/Addin/session_file.*-s-*.bat"):
baseName = os.path.basename(fileName)
if -1 != baseName.find("-dev") or -1 != baseName.find("-x64"): continue
zfile.write("../XL-Launcher/bin/Addin/" + baseName, ROOT_DIR + baseName)
zfile.close()
def makeZipStaticX64():
zipFilePath = "zip/%s-%s-%s.zip" % (QLXL_VERSION, "x64", "RateCurveFramework")
zfile = zipfile.ZipFile(zipFilePath, "w", zipfile.ZIP_DEFLATED)
# Zip up some specific files from the QuantLibXL directory.
zfile.write("Docs/QuantLibXL-docs-" + VERSION + ".chm", ROOT_DIR + "Docs/QuantLibXL-docs-" + VERSION + ".chm")
zfile.write("xll/QuantLibXL-" + VC_VERSION + "-x64-mt-s-" + VERSION_ + ".xll", ROOT_DIR + "xll/QuantLibXL-" + VC_VERSION + "-x64-mt-s-" + VERSION_ + ".xll")
zfile.write("zip/README.txt", ROOT_DIR + "README.txt")
# Recursively zip some subdirectories of the QuantLibXL directory.
#os.path.walk("Data", visit, (zfile, ".gitignore", None))
os.path.walk("Data2/XLS", visit, (zfile, ".gitignore", None))
os.path.walk("framework", visit, (zfile, "ReadMe.txt", None))
#os.path.walk("Workbooks", visit, (zfile, None, None))
# Zip up some files from other projects in the repo.
os.path.walk("../QuantLibAddin/gensrc/metadata", visit, (zfile, None, "../QuantLibAddin/gensrc/"))
zfile.write("../XL-Launcher/bin/Addin/Launcher.xla", ROOT_DIR + "Launcher.xla")
zfile.write("../XL-Launcher/bin/Addin/README.txt", ROOT_DIR + "README-session_files.txt")
for fileName in glob.glob("../XL-Launcher/bin/Addin/session_file.*x64-s-*.xml"):
baseName = os.path.basename(fileName)
if -1 != baseName.find("-dev"): continue
zfile.write("../XL-Launcher/bin/Addin/" + baseName, ROOT_DIR + baseName)
for fileName in glob.glob("../XL-Launcher/bin/Addin/session_file.*x64-s-*.bat"):
baseName = os.path.basename(fileName)
if -1 != baseName.find("-dev"): continue
zfile.write("../XL-Launcher/bin/Addin/" + baseName, ROOT_DIR + baseName)
zfile.close()
def zipBinaryFiles(zipFile):
#zipFile.zip("zip\\README.txt", zipFile.root + "README.txt")
zipFile.zip("xll\\QuantLibXL-" + VC_VERSION + "-mt-s-" + VERSION_ + ".xll")
zipFile.zip("xll\\QuantLibXL-" + VC_VERSION + "-x64-mt-s-" + VERSION_ + ".xll")
#zipFile.zip("Docs\\QuantLibXL-docs-" + VERSION + ".chm")
Selector(
inputPath = 'Workbooks',
zipFile = zipFile,
incFiles = (
re.compile('^.*\.TXT$'),
re.compile('^.*\.bat$'),
re.compile('^.*\.xlsm$'),
re.compile('^.*\.xlam$'),
re.compile('^.*\.xlsx$'),),
)
def zipFrameworkFiles(zipFile):
zipFile.zip("../XL-Launcher/bin/Addin/Launcher.xla", zipFile.root + "Launcher.xla")
zipFile.zip("../XL-Launcher/bin/Addin/session_file.public.live.xml", zipFile.root + "session_file.xml")
zipFile.zip("../XL-Launcher/bin/Addin/session_file.public.live.bat", zipFile.root + "session_file.public.live.bat")
zipFile.zip("../XL-Launcher/bin/Addin/session_file.public.live.xml", zipFile.root + "session_file.public.live.xml")
Selector(
inputPath = 'Data2',
zipFile = zipFile,
)
Selector(
inputPath = 'framework',
zipFile = zipFile,
)
def zipSourceFiles(zipFile):
zipFile.zipGlob("*.sln")
#zipFile.zip("Docs\\Makefile.vc")
#zipFile.zip("Docs\\quantlibxl.doxy")
#zipFile.zipGlob("Docs\\*.css")
#zipFile.zipGlob("Docs\\*.html")
#zipFile.zipGlob("Docs\\*.vcproj")
#zipFile.zipGlob("Docs\\*.vcxproj")
#zipFile.zipGlob("Docs\\images\\*.bmp")
#zipFile.zipGlob("Docs\\images\\*.ico")
#zipFile.zipGlob("Docs\\images\\*.jpg")
#zipFile.zipGlob("Docs\\images\\*.png")
#zipFile.zipGlob("Docs\\pages\\*.docs")
Selector(
inputPath = 'qlxl',
zipFile = zipFile,
excDirs = (
re.compile('^build.*'),),
excFiles = (
re.compile('^.gitignore$'),
re.compile('^Makefile.am$'),
re.compile('^.*\.user$'),
re.compile('^.*\.filters$')),
)
def makeZipBinary():
zipFile = ZipFile("zip/" + QLXL_VERSION + "-bin.zip", ROOT_DIR)
zipBinaryFiles(zipFile)
def makeZipFramework():
zipFile = ZipFile("zip/" + QLXL_VERSION + "-framework.zip", ROOT_DIR)
zipBinaryFiles(zipFile)
zipFrameworkFiles(zipFile)
def makeZipSource():
zipFile = ZipFile("zip/" + QLXL_VERSION + ".zip", QLXL + "\\")
zipSourceFiles(zipFile)
parser = argparse.ArgumentParser(description='zip up QuantLibXL')
parser.add_argument('-t','--target', help='target environment', required=True)
args = vars(parser.parse_args())
if 'binary' == args['target']:
makeZipBinary()
elif 'framework' == args['target']:
makeZipFramework()
elif 'source' == args['target']:
makeZipSource()
elif 'static' == args['target']:
makeZipStatic()
elif 'staticX64' == args['target']:
makeZipStaticX64()
else:
print "Error - unsupported target : " + args['target']
raw_input('press any key to exit')
|
{
"content_hash": "e56c50f19a5a7576790a9118bf5bcf34",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 160,
"avg_line_length": 36.221804511278194,
"alnum_prop": 0.5981318111053451,
"repo_name": "eehlers/QuantLibXL",
"id": "d2dadc1ee40afb951163f3424a3646f136771a1d",
"size": "9636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "make_zip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2158"
},
{
"name": "C++",
"bytes": "23275"
},
{
"name": "CSS",
"bytes": "843"
},
{
"name": "HTML",
"bytes": "8757"
},
{
"name": "JavaScript",
"bytes": "1125"
},
{
"name": "M4",
"bytes": "2169"
},
{
"name": "Makefile",
"bytes": "500"
},
{
"name": "Python",
"bytes": "9636"
},
{
"name": "Shell",
"bytes": "257"
}
],
"symlink_target": ""
}
|
"""Factory methods for Random Forest baseline agent."""
import dataclasses
import chex
from neural_testbed import base as testbed_base
import numpy as np
from sklearn import ensemble
@dataclasses.dataclass
class RandomForestConfig:
n_estimators: int = 100 # Number of elements in random forest
criterion: str = 'gini' # Splitting criterion 'gini' or 'entropy'
def make_agent(config: RandomForestConfig) -> testbed_base.TestbedAgent:
"""Factory method to create a random forest agent."""
def random_forest_agent(
data: testbed_base.Data,
prior: testbed_base.PriorKnowledge,
) -> testbed_base.EpistemicSampler:
# sklearn cannot handle instances with no samples of that class
# so we add a fake sample for every class here as a hack
new_x = np.zeros((prior.num_classes, data.x.shape[1]))
new_y = np.expand_dims(np.arange(prior.num_classes), axis=1)
data = testbed_base.Data(
np.concatenate((data.x, new_x), axis=0),
np.concatenate((data.y, new_y), axis=0))
random_forest = ensemble.RandomForestClassifier(
n_estimators=config.n_estimators, criterion=config.criterion)
random_forest.fit(data.x, np.ravel(data.y))
# Ensure that the number of classes is correct
# (this will fail if the fake data isn't added above)
assert len(random_forest.classes_) == prior.num_classes
def enn_sampler(x: chex.Array, seed: int = 0) -> chex.Array:
del seed # seed does not affect the random_forest agent.
probs = random_forest.predict_proba(x)
# threshold the probabilities, otherwise get nans in the KL calculation
probs = np.minimum(np.maximum(probs, 0.01), 0.99)
return np.log(probs) # return logits
return enn_sampler
return random_forest_agent
|
{
"content_hash": "244983b4191c979973b71d350f0a3c2f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 77,
"avg_line_length": 36.12244897959184,
"alnum_prop": 0.7045197740112994,
"repo_name": "deepmind/neural_testbed",
"id": "9b101c41437273a290fe94b8617971241962f999",
"size": "2500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neural_testbed/agents/factories/random_forest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1121696"
},
{
"name": "Python",
"bytes": "378222"
},
{
"name": "Shell",
"bytes": "1700"
}
],
"symlink_target": ""
}
|
import sys
print(sys.version.split()[0]) # First part of string
|
{
"content_hash": "ba8f8215e17524d8a5201c880ba5d34c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 56,
"avg_line_length": 34,
"alnum_prop": 0.6911764705882353,
"repo_name": "dreadrel/UWF_2014_spring_COP3990C-2507",
"id": "0b95335f1faabd63b06892e8c2a9238a4511e7cc",
"size": "78",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebooks/scripts/book_code/code/what.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1796"
},
{
"name": "Python",
"bytes": "493591"
}
],
"symlink_target": ""
}
|
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Theanet'
copyright = '2015, rakeshvar'
author = 'rakeshvar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Theanetdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Theanet.tex', 'Theanet Documentation',
'rakeshvar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'theanet', 'Theanet Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Theanet', 'Theanet Documentation',
author, 'Theanet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "b00778d80ce62cbcd9dbf13bb65c2440",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 79,
"avg_line_length": 32.28044280442804,
"alnum_prop": 0.7048468221307728,
"repo_name": "ilyakava/theanet",
"id": "304e334a3fe5d75adb52033b49ccf12d9f3f808d",
"size": "9191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "52496"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup
from urllib import request
PERSON_URL = "http://www.imdb.com/name/"
def get_movie_titles(imdb_tag):
"""This function scrapes the person data from imdb.com and then creates
a set of all their movies. It also gets the person's name.
"""
page = request.urlopen(PERSON_URL + imdb_tag)
soup = BeautifulSoup(page, "lxml")
person = soup.h1.span.string
print(person)
film_category = soup.find("div", "filmo-category-section")
movie_titles = []
for b in film_category.find_all("b"):
movie_titles.append(b.string)
return movie_titles
def get_shared_movies(one, two):
return [x for x in one if x in two]
def user_interface():
imdb_tag = input("Please enter the imdb tag of a person: ")
imdb_tag2 = input("Please enter the imdb tag of another person: ")
return imdb_tag, imdb_tag2
def main():
# Assign the input to variables
imdb_tag, imdb_tag2 = user_interface()
# Get the movie titles
person_one = get_movie_titles(imdb_tag) #nm0000120
person_two = get_movie_titles(imdb_tag2) #nm0001099
# Find out what movies the share and print out
shared_movies = get_shared_movies(person_one, person_two)
if shared_movies == []:
print("No shared movies found.")
else:
for movie in shared_movies:
print(movie)
main()
|
{
"content_hash": "8a47835f3de10f867a245919d92096c8",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 75,
"avg_line_length": 32.22727272727273,
"alnum_prop": 0.6382228490832158,
"repo_name": "corinennis/imdb_shared_movies",
"id": "5a713b207fcca982ccd67e0fe8982ec0c4b03c98",
"size": "1418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imdb_shared_movies/imdb_shared_movies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1418"
}
],
"symlink_target": ""
}
|
calendar_events_mapping = {
'action': '/char/UpcomingCalendarEvents.xml.aspx',
'fields': ['eventID', 'ownerName', 'eventDate', 'eventTitle', 'duration',
'eventText']
}
contracts_mapping = {
'action': '/char/Contracts.xml.aspx',
'fields': ['contractID', 'startStationID', 'status', 'price']
}
contract_items_mapping = {
'action': '/char/ContractItems.xml.aspx',
'fields': ['typeID', 'quantity', 'included']
}
corp_contracts_mapping = {
'action': '/corp/Contracts.xml.aspx',
'fields': ['contractID', 'startStationID', 'type', 'status', 'price',
'title']
}
corp_contract_items_mapping = {
'action': '/corp/ContractItems.xml.aspx',
'fields': ['typeID', 'quantity', 'included']
}
|
{
"content_hash": "e12f1f8c7e179b1d63f91494184ee4f7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 77,
"avg_line_length": 28.807692307692307,
"alnum_prop": 0.6101468624833111,
"repo_name": "JudgeGregg/Twisted-EvE-API",
"id": "7b0aff458d6fee3dfb245607f8b3b8104635c13e",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7798"
}
],
"symlink_target": ""
}
|
import unittest
from touchdown.config import expressions
from touchdown.core import serializers
class TestExpressions(unittest.TestCase):
def test_pwgen(self):
serializer = expressions.pwgen(length=31, symbols=True)
self.assertTrue(isinstance(serializer, serializers.Expression))
rendered = serializer.render(None, None)
self.assertEqual(len(rendered), 31)
def test_django_key(self):
serializer = expressions.django_secret_key()
self.assertTrue(isinstance(serializer, serializers.Expression))
rendered = serializer.render(None, None)
self.assertEqual(len(rendered), 50)
def test_fernet_key(self):
serializer = expressions.fernet_secret_key()
self.assertTrue(isinstance(serializer, serializers.Expression))
rendered = serializer.render(None, None)
self.assertEqual(len(rendered), 44)
def test_rsa_private_key(self):
serializer = expressions.rsa_private_key()
self.assertTrue(isinstance(serializer, serializers.Expression))
rendered = serializer.render(None, None).splitlines()
self.assertEqual(rendered[0], "-----BEGIN RSA PRIVATE KEY-----")
self.assertEqual(rendered[-1], "-----END RSA PRIVATE KEY-----")
def test_dsa_private_key(self):
serializer = expressions.rsa_private_key()
self.assertTrue(isinstance(serializer, serializers.Expression))
rendered = serializer.render(None, None).splitlines()
self.assertEqual(rendered[0], "-----BEGIN RSA PRIVATE KEY-----")
self.assertEqual(rendered[-1], "-----END RSA PRIVATE KEY-----")
|
{
"content_hash": "5af69a1a308834116622c7891f88ee30",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 42.89473684210526,
"alnum_prop": 0.6822085889570552,
"repo_name": "yaybu/touchdown",
"id": "4849a1c85ec5a9c14a228a49111375be0a60975f",
"size": "2208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "touchdown/tests/test_config_expressions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "760"
},
{
"name": "Python",
"bytes": "1047173"
}
],
"symlink_target": ""
}
|
import json
from twisted.protocols.basic import LineReceiver
from twisted.internet import defer
class Request(object):
def __init__(self,protocol,method):
self.protocol = protocol
self.method = method
def __call__(self,*args,**kwargs):
return self.protocol.callRemote(self.method,*args,**kwargs)
def notify(self,*args,**kwargs):
return self.protocol.notifyRemote(self.method,*args,**kwargs)
class Response(Exception):
def __init__(self,value=None):
self.value = value
def format(self,ctx):
return {'jsonrpc':'2.0','result':self.value,'id':ctx}
class ProtocolException(Exception):
def __init__(self,code,message,data=None):
self.code = int(code)
self.message = unicode(message)
self.data = data
def __repr__(self):
return 'ProtocolException(%d,%s,%s)'%(self.code,repr(self.message),repr(self.data))
def format(self,ctx=None):
e = {'code':self.code,'message':self.message}
if self.data is not None:
e['data'] = self.data
return {'jsonrpc':'2.0','error':e,'id':ctx}
@staticmethod
def stringify(self,ex):
return json.dumps({'jsonrpc':'2.0','error':{'code':-32000,'message':ex.__class__.__name__,'data':str(ex)},'id':None},separators=(',',':'))
class ParseError(ProtocolException):
def __init__(self,message='Parse error.',data=None):
ProtocolException.__init__(self,-32700,message,data)
class InvalidRequest(ProtocolException):
def __init__(self,message='Invalid request.',data=None):
ProtocolException.__init__(self,-32600,message,data)
class MethodNotFound(ProtocolException):
def __init__(self,message='Method not found.',data=None):
ProtocolException.__init__(self,-32601,message,data)
class InvalidParams(ProtocolException):
def __init__(self,message='Invalid parameters.',data=None):
ProtocolException.__init__(self,-32602,message,data)
class InternalError(ProtocolException):
def __init__(self,message='Internal error.',data=None):
ProtocolException.__init__(self,-32603,message,data)
class PythonError(ProtocolException):
def __init__(self,ex):
ProtocolException.__init__(self,-32000,ex.__class__.__name__)
class JsonRPCProtocol(LineReceiver):
delimiter = '\n'
def __init__(self):
self._buf = None
self._request = {}
self._deferred = {}
def lineReceived(self,line):
try:
try:
data = json.loads(line)
except ValueError:
data = (None,)
raise ParseError()
res = defer.maybeDeferred(self.jsonReceived,data)
res.addCallback(self.sendJson)
except ProtocolException,ex:
self.sendJson(ex.format(None))
def jsonReceived(self,data):
if isinstance(data,dict):
data_id = None
try:
if data.get('jsonrpc',None) != '2.0':
raise InvalidRequest()
if 'method' in data:
if 'id' in data:
data_id = data['id']
if not (isinstance(data_id,basestring) or isinstance(data_id,float) or isinstance(data_id,int) or data_id is None):
data_id = None
raise InvalidRequest()
else:
data_id = False
data_method = data['method']
if not isinstance(data_method,basestring):
raise InvalidRequest()
try:
fn = getattr(self,'jsonrpc_'+data_method)
except AttributeError:
raise MethodNotFound()
if 'params' in data:
data_params = data['params']
if isinstance(data_params,list):
try:
response = fn(*data_params)
except Exception,ex:
raise PythonError(ex)
elif isinstance(data_params,dict):
# try:
response = fn(**data_params)
# except Exception,ex:
# raise PythonError(ex)
else:
raise InvalidRequest()
else:
try:
response = fn()
except Exception,ex:
raise PythonError(ex)
if data_id is not False:
raise Response(response)
elif 'result' in data:
if 'error' in data or 'id' not in data:
raise InvalidRequest()
data_id = data['id']
try:
deferred = self._deferred[data_id]
del self._deferred[data_id]
except KeyError:
return
deferred.callback(data['result'])
elif 'error' in data:
if 'result' in data or 'id' not in data:
raise InvalidRequest()
data_id = data['id']
try:
deferred = self._deferred[data_id]
del self._deferred[data_id]
except KeyError:
return
deferred.errback(data['error'])
except (Response,ProtocolException),ex:
if data_id is not False:
return ex.format(data_id)
elif isinstance(data,list):
@defer.inlineCallbacks
def runall():
responses = []
for request in data:
if isinstance(request,dict):
response = yield defer.maybeDeferred(self.jsonReceived,request)
if response is not None:
responses.append(response)
defer.returnValue(responses)
return runall()
def sendJson(self,data):
if isinstance(data,dict) or isinstance(data,list):
self.sendLine(json.dumps(data,separators=(',',':')))
def callRemote(self,_method,*args,**kwargs):
largs = len(args)
lkwargs = len(kwargs)
if largs > 0 and lkwargs > 0:
raise TypeError('cannot use both positional and named parameters')
elif largs > 0:
params = args
elif lkwargs > 0:
params = kwargs
else:
params = None
d = defer.Deferred()
try:
ctx = max(self._deferred)+1
except ValueError:
ctx = 0
o = {'jsonrpc':'2.0','method':_method,'id':ctx}
if params is not None:
o['params'] = params
self._deferred[ctx] = d
if isinstance(self._buf,list):
self._buf.append(o)
else:
self.sendJson(o)
return d
def notifyRemote(self,_method,*args,**kwargs):
largs = len(args)
lkwargs = len(kwargs)
if largs > 0 and lkwargs > 0:
raise TypeError('cannot use both positional and named parameters')
elif largs > 0:
params = args
elif lkwargs > 0:
params = kwargs
else:
params = None
o = {'jsonrpc':'2.0','method':_method}
if params is not None:
o['params'] = params
if isinstance(self._buf,list):
self._buf.append(o)
else:
self.sendJson(o)
return self
def beginQueue(self):
self.endQueue()
self._buf = []
return self
def endQueue(self):
if isinstance(self._buf,list):
self.sendJson(self._buf)
self._buf = None
return self
def __getattr__(self,key):
if key.startswith('_'):
raise AttributeError('%s instance has no attribute \'%s\''%(self.__class__.__name__,key))
try:
return self._request[key]
except KeyError:
self._request[key] = Request(self,key)
return self._request[key]
|
{
"content_hash": "a5068eb40dc29d1e1f8072e1338d57df",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 142,
"avg_line_length": 32.72222222222222,
"alnum_prop": 0.5977645727221279,
"repo_name": "johnny-die-tulpe/sauron",
"id": "021971335bbc9271c866c47ba455ffbb21fb8690",
"size": "8195",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sauron/utils/jsonrpc2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105915"
},
{
"name": "Shell",
"bytes": "1897"
}
],
"symlink_target": ""
}
|
import logging
import os
import subprocess
import sys
import threading
import unittest
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
SWARMING_BOT_DIR = os.path.dirname(THIS_DIR)
import singleton
PYCODE = """
import sys
sys.path.insert(0, '%s')
from bot_code import singleton
print(singleton.Singleton(%r).acquire())
""" % (SWARMING_BOT_DIR, THIS_DIR)
CMD_ACQUIRE = [sys.executable, '-u', '-c', PYCODE]
class Test(unittest.TestCase):
# This test fails when running by nose2 in python3.
# It passes when running this script as an executable.
no_run = 1
def test_singleton_with(self):
with singleton.singleton(THIS_DIR) as s:
self.assertEqual(True, s)
def test_singleton_recursive(self):
with singleton.singleton(THIS_DIR) as s:
self.assertEqual(True, s)
with singleton.singleton(THIS_DIR) as s2:
self.assertEqual(False, s2)
with singleton.singleton(THIS_DIR) as s3:
self.assertEqual(False, s3)
def test_singleton_acquire(self):
f = singleton.Singleton(THIS_DIR)
try:
f.acquire()
finally:
f.release()
@unittest.skipIf(sys.platform == 'win32',
'TODO(crbug.com/1017545): fail subprocess')
def test_singleton_child(self):
logging.info('using command:\n%s', ' '.join(CMD_ACQUIRE))
with singleton.singleton(THIS_DIR):
pass
self.assertEqual(b'True\n', subprocess.check_output(CMD_ACQUIRE))
with singleton.singleton(THIS_DIR):
self.assertEqual(b'False\n', subprocess.check_output(CMD_ACQUIRE))
self.assertEqual(b'True\n', subprocess.check_output(CMD_ACQUIRE))
if __name__ == '__main__':
os.chdir(THIS_DIR)
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)
unittest.main()
|
{
"content_hash": "9217f2d18997778bed089f197a5d7580",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 72,
"avg_line_length": 27.696969696969695,
"alnum_prop": 0.6821663019693655,
"repo_name": "luci/luci-py",
"id": "02d1145e51897b0a5e855d30901973311afa1c91",
"size": "2026",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "appengine/swarming/swarming_bot/bot_code/singleton_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5576"
},
{
"name": "HTML",
"bytes": "1900972"
},
{
"name": "JavaScript",
"bytes": "113046"
},
{
"name": "Makefile",
"bytes": "11718"
},
{
"name": "Python",
"bytes": "5885612"
},
{
"name": "Shell",
"bytes": "5183"
}
],
"symlink_target": ""
}
|
"""The tests for the Tasmota switch platform."""
import copy
import json
from unittest.mock import patch
from hatasmota.utils import (
get_topic_stat_result,
get_topic_tele_state,
get_topic_tele_will,
)
from homeassistant.components import switch
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_OFF, STATE_ON
from .test_common import (
DEFAULT_CONFIG,
help_test_availability,
help_test_availability_discovery_update,
help_test_availability_poll_state,
help_test_availability_when_connection_lost,
help_test_discovery_device_remove,
help_test_discovery_removal,
help_test_discovery_update_unchanged,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
)
from tests.common import async_fire_mqtt_message
from tests.components.switch import common
async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"OFF"}')
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"ON"}')
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"OFF"}')
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_sending_mqtt_commands(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Turn the switch on and verify MQTT message is sent
await common.async_turn_on(hass, "switch.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Power1", "ON", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be off
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
# Turn the switch off and verify MQTT message is sent
await common.async_turn_off(hass, "switch.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Power1", "OFF", 0, False
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_relay_as_light(hass, mqtt_mock, setup_tasmota):
"""Test relay does not show up as switch in light mode."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state is None
state = hass.states.get("light.test")
assert state is not None
async def test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test availability after MQTT disconnection."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
await help_test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, switch.DOMAIN, config
)
async def test_availability(hass, mqtt_mock, setup_tasmota):
"""Test availability."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
await help_test_availability(hass, mqtt_mock, switch.DOMAIN, config)
async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test availability discovery update."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
await help_test_availability_discovery_update(
hass, mqtt_mock, switch.DOMAIN, config
)
async def test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test polling after MQTT connection (re)established."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
poll_topic = "tasmota_49A3BC/cmnd/STATE"
await help_test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, switch.DOMAIN, config, poll_topic, ""
)
async def test_discovery_removal_switch(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered switch."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["rl"][0] = 1
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["rl"][0] = 0
await help_test_discovery_removal(
hass, mqtt_mock, caplog, switch.DOMAIN, config1, config2
)
async def test_discovery_removal_relay_as_light(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered relay as light."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["rl"][0] = 1
config1["so"]["30"] = 0 # Disable Home Assistant auto-discovery as light
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["rl"][0] = 1
config2["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
await help_test_discovery_removal(
hass, mqtt_mock, caplog, switch.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_switch(
hass, mqtt_mock, caplog, setup_tasmota
):
"""Test update of discovered switch."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
with patch(
"homeassistant.components.tasmota.switch.TasmotaSwitch.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, switch.DOMAIN, config, discovery_update
)
async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
unique_id = f"{DEFAULT_CONFIG['mac']}_switch_relay_0"
await help_test_discovery_device_remove(
hass, mqtt_mock, switch.DOMAIN, unique_id, config
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
topics = [
get_topic_stat_result(config),
get_topic_tele_state(config),
get_topic_tele_will(config),
]
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, switch.DOMAIN, config, topics
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test MQTT discovery update when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, switch.DOMAIN, config
)
|
{
"content_hash": "abb066cd5b736165df0e6e8bf35b887f",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 88,
"avg_line_length": 32.67886178861789,
"alnum_prop": 0.67595472073641,
"repo_name": "FreekingDean/home-assistant",
"id": "00b0a922e0a052e71ad54ff17b9ae70ed2a9b080",
"size": "8039",
"binary": false,
"copies": "10",
"ref": "refs/heads/dev",
"path": "tests/components/tasmota/test_switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
from .base import BaseReporter
class InformationReporter(BaseReporter):
title = 'Information'
def run(self, error):
return self.from_operator('information', None)
|
{
"content_hash": "1018713c0dd6f1ddb7c617250064cc4e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.7103825136612022,
"repo_name": "grappa-py/grappa",
"id": "7391852b3be28816cbc5abcca4d1ec8597f1300b",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grappa/reporters/information.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1520"
},
{
"name": "Python",
"bytes": "144569"
}
],
"symlink_target": ""
}
|
"""
conpaas.services.taskfarm.manager.manager
=========================================
ConPaaS TaskFarm manager.
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from threading import Thread
from conpaas.core.expose import expose
from conpaas.core.manager import BaseManager
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse
from conpaas.services.taskfarm.agent import client
# import node_info
class TaskFarmManager(BaseManager):
"""Manager class with the following exposed methods:
startup() -- POST
shutdown() -- POST
add_nodes(count) -- POST
remove_nodes(count) -- POST
list_nodes() -- GET
get_service_info() -- GET
get_node_info(serviceNodeId) -- GET
"""
# Manager states
S_INIT = 'INIT' # manager initialized but not yet started
S_PROLOGUE = 'PROLOGUE' # manager is starting up
S_RUNNING = 'RUNNING' # manager is running
S_ADAPTING = 'ADAPTING' # manager is in a transient state - frontend will
# keep polling until manager out of transient state
S_EPILOGUE = 'EPILOGUE' # manager is shutting down
S_STOPPED = 'STOPPED' # manager stopped
S_ERROR = 'ERROR' # manager is in error state
# String template for error messages returned when performing actions in
# the wrong state
WRONG_STATE_MSG = "ERROR: cannot perform %(action)s in state %(curstate)s"
# String template for error messages returned when a required argument is
# missing
REQUIRED_ARG_MSG = "ERROR: %(arg)s is a required argument"
# String template for debugging messages logged on nodes creation
ACTION_REQUESTING_NODES = "requesting %(count)s nodes in %(action)s"
AGENT_PORT = 5555
def __init__(self, config_parser, **kwargs):
"""Initialize a TaskFarm Manager.
'config_parser' represents the manager config file.
**kwargs holds anything that can't be sent in config_parser."""
BaseManager.__init__(self, config_parser)
self.nodes = []
# Setup the clouds' controller
self.controller.generate_context('taskfarm')
self.state = self.S_INIT
self.hub_ip = None
@expose('POST')
def startup(self, kwargs):
"""Start the TaskFarm service"""
self.logger.info('Manager starting up')
# Starting up the service makes sense only in the INIT or STOPPED
# states
if self.state != self.S_INIT and self.state != self.S_STOPPED:
vals = { 'curstate': self.state, 'action': 'startup' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
self.state = self.S_PROLOGUE
Thread(target=self._do_startup, kwargs=kwargs).start()
return HttpJsonResponse({ 'state': self.state })
def _do_startup(self, cloud):
"""Start up the service. The first node will be an agent running a
TaskFarm Hub and a TaskFarm Node."""
startCloud = self._init_cloud(cloud)
vals = { 'action': '_do_startup', 'count': 1 }
self.logger.debug(self.ACTION_REQUESTING_NODES % vals)
try:
nodes = self.controller.create_nodes(1,
client.check_agent_process, self.AGENT_PORT, startCloud)
hub_node = nodes[0]
# The first agent is a TaskFarm Hub and a TaskFarm Node
client.create_hub(hub_node.ip, self.AGENT_PORT)
client.create_node(hub_node.ip, self.AGENT_PORT, hub_node.ip)
self.logger.info("Added node %s: %s " % (hub_node.id, hub_node.ip))
# node_info.add_node_info('/etc/hosts', hub_node.ip, hub_node.id)
self.hub_ip = hub_node.ip
# Extend the nodes list with the newly created one
self.nodes += nodes
self.state = self.S_RUNNING
except Exception, err:
self.logger.exception('_do_startup: Failed to create hub: %s' % err)
self.state = self.S_ERROR
@expose('POST')
def shutdown(self, kwargs):
"""Switch to EPILOGUE and call a thread to delete all nodes"""
# Shutdown only if RUNNING
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'shutdown' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
self.state = self.S_EPILOGUE
Thread(target=self._do_shutdown, args=[]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_shutdown(self):
"""Delete all nodes and switch to status STOPPED"""
self.controller.delete_nodes(self.nodes)
self.state = self.S_STOPPED
def __check_count_in_args(self, kwargs):
"""Return 'count' if all is good. HttpErrorResponse otherwise."""
# The frontend sends count under 'node'.
if 'node' in kwargs:
kwargs['count'] = kwargs['node']
if not 'count' in kwargs:
return HttpErrorResponse(self.REQUIRED_ARG_MSG % { 'arg': 'count' })
if not isinstance(kwargs['count'], int):
return HttpErrorResponse(
"ERROR: Expected an integer value for 'count'")
return int(kwargs['count'])
@expose('POST')
def add_nodes(self, kwargs):
"""Add kwargs['count'] nodes to this deployment"""
self.controller.add_context_replacement(dict(STRING='taskfarm'))
# Adding nodes makes sense only in the RUNNING state
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'add_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Ensure 'count' is valid
count_or_err = self.__check_count_in_args(kwargs)
if isinstance(count_or_err, HttpErrorResponse):
return count_or_err
count = count_or_err
self.state = self.S_ADAPTING
Thread(target=self._do_add_nodes, args=[count, kwargs['cloud']]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_add_nodes(self, count, cloud):
"""Add 'count' TaskFarm Nodes to this deployment"""
startCloud = self._init_cloud(cloud)
vals = { 'action': '_do_add_nodes', 'count': count }
self.logger.debug(self.ACTION_REQUESTING_NODES % vals)
node_instances = self.controller.create_nodes(count,
client.check_agent_process, self.AGENT_PORT, startCloud)
# Startup agents
for node in node_instances:
client.create_node(node.ip, self.AGENT_PORT, self.hub_ip)
self.logger.info("Added node %s: %s " % (node.id, node.ip))
# node_info.add_node_info('/etc/hosts', node.ip, node.id)
self.nodes += node_instances
self.state = self.S_RUNNING
@expose('POST')
def remove_nodes(self, kwargs):
"""Remove kwargs['count'] nodes from this deployment"""
# Removing nodes only if RUNNING
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'remove_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Ensure 'count' is valid
count_or_err = self.__check_count_in_args(kwargs)
if isinstance(count_or_err, HttpErrorResponse):
return count_or_err
count = count_or_err
if count > len(self.nodes) - 1:
return HttpErrorResponse("ERROR: Cannot remove so many nodes")
self.state = self.S_ADAPTING
Thread(target=self._do_remove_nodes, args=[count]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_remove_nodes(self, count):
"""Remove 'count' nodes, starting from the end of the list. This way
the TaskFarm Hub gets removed last."""
for _ in range(count):
node = self.nodes.pop()
self.logger.info("Removing node with IP %s" % node.ip)
self.controller.delete_nodes([ node ])
# node_info.remove_node_info('/etc/hosts', node.ip)
self.state = self.S_RUNNING
def __is_hub(self, node):
"""Return True if the given node is the TaskFarm Hub"""
return node.ip == self.hub_ip
@expose('GET')
def list_nodes(self, kwargs):
"""Return a list of running nodes"""
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'list_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
taskfarm_nodes = [
node.id for node in self.nodes if not self.__is_hub(node)
]
taskfarm_hub = [
node.id for node in self.nodes if self.__is_hub(node)
]
return HttpJsonResponse({
'hub': taskfarm_hub,
'node': taskfarm_nodes
})
@expose('GET')
def get_service_info(self, kwargs):
"""Return the service state and type"""
return HttpJsonResponse({'state': self.state, 'type': 'taskfarm'})
@expose('GET')
def get_node_info(self, kwargs):
"""Return information about the node identified by the given
kwargs['serviceNodeId']"""
# serviceNodeId is a required parameter
if 'serviceNodeId' not in kwargs:
vals = { 'arg': 'serviceNodeId' }
return HttpErrorResponse(self.REQUIRED_ARG_MSG % vals)
serviceNodeId = kwargs.pop('serviceNodeId')
serviceNode = None
for node in self.nodes:
if serviceNodeId == node.id:
serviceNode = node
break
if serviceNode is None:
return HttpErrorResponse(
'ERROR: Cannot find node with serviceNode=%s' % serviceNodeId)
return HttpJsonResponse({
'serviceNode': {
'id': serviceNode.id,
'ip': serviceNode.ip,
'is_hub': self.__is_hub(serviceNode)
}
})
|
{
"content_hash": "78d8f9c8cf4f61b7537ff5e035ebdb50",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 80,
"avg_line_length": 35.90252707581227,
"alnum_prop": 0.5983911513323278,
"repo_name": "ema/conpaas",
"id": "5cecb3b2c79b3542fc105a085d68470a57ac88f4",
"size": "9970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conpaas-services/src/conpaas/services/taskfarm/manager/manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "59192"
},
{
"name": "Java",
"bytes": "399657"
},
{
"name": "JavaScript",
"bytes": "108113"
},
{
"name": "PHP",
"bytes": "1824901"
},
{
"name": "Python",
"bytes": "2405080"
},
{
"name": "Shell",
"bytes": "157790"
}
],
"symlink_target": ""
}
|
import os
import sys
from ctk_cli import CLIArgumentParser
# Append ITK libs
sys.path.append(os.path.join(os.environ['ITK_BUILD_DIR'],
'Wrapping/Generators/Python'))
sys.path.append(os.path.join(os.environ['ITK_BUILD_DIR'], 'lib')
# Append TubeTK libs :
sys.path.append(os.environ['TUBETK_BUILD_DIR'], 'TubeTK-build/lib/TubeTK')
import itk
from itk import TubeTKITK as itktube
def main(args):
PixelType = itk.UC
Dimension = 3
# Read tre file
TubeFileReaderType = itk.SpatialObjectReader[Dimension]
tubeFileReader = TubeFileReaderType.New()
tubeFileReader.SetFileName(args.inputTubeFile)
tubeFileReader.Update()
# Read template image
TemplateImageType = itk.Image[PixelType, Dimension]
TemplateImageReaderType = itk.ImageFileReader[TemplateImageType]
templateImageReader = TemplateImageReaderType.New()
templateImageReader.SetFileName(args.inputTemplateImage)
templateImageReader.Update()
# call ConvertTubesToImage
TubesToImageFilterType = itktube.ConvertTubesToImage[Dimension, PixelType]
tubesToImageFilter = TubesToImageFilterType.New()
tubesToImageFilter.SetUseRadius(args.useRadii)
tubesToImageFilter.SetTemplateImage(templateImageReader.GetOutput())
tubesToImageFilter.SetInput(tubeFileReader.GetOutput())
# write output image
TubeImageWriterType = itk.ImageFileWriter[TemplateImageType]
tubeImageWriter = TubeImageWriterType.New()
tubeImageWriter.SetInput(tubesToImageFilter.GetOutput())
tubeImageWriter.SetFileName(args.outputImageFile)
tubeImageWriter.Update()
if __name__ == "__main__":
main(CLIArgumentParser().parse_args())
|
{
"content_hash": "dd42d686670968583f793d288bfc91ed",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 76,
"avg_line_length": 31.384615384615383,
"alnum_prop": 0.7769607843137255,
"repo_name": "cdeepakroy/TubeTK",
"id": "323e02061c83ade50e5da352bf79f6a2035cf027",
"size": "1632",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Applications/ConvertTubesToImage/ConvertTubesToImage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9149"
},
{
"name": "C++",
"bytes": "3920935"
},
{
"name": "CMake",
"bytes": "625504"
},
{
"name": "CSS",
"bytes": "17428"
},
{
"name": "Python",
"bytes": "209126"
},
{
"name": "Shell",
"bytes": "37371"
},
{
"name": "XSLT",
"bytes": "8636"
}
],
"symlink_target": ""
}
|
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Derived from Cursor3D. This script increases the coverage of the
# vtkImageInplaceFilter super class.
# global values
CURSOR_X = 20
CURSOR_Y = 20
CURSOR_Z = 20
IMAGE_MAG_X = 2
IMAGE_MAG_Y = 2
IMAGE_MAG_Z = 1
# pipeline stuff
reader = vtk.vtkSLCReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/nut.slc")
# make the image a little bigger
magnify1 = vtk.vtkImageMagnify()
magnify1.SetInputConnection(reader.GetOutputPort())
magnify1.SetMagnificationFactors(IMAGE_MAG_X, IMAGE_MAG_Y, IMAGE_MAG_Z)
magnify1.ReleaseDataFlagOn()
magnify2 = vtk.vtkImageMagnify()
magnify2.SetInputConnection(reader.GetOutputPort())
magnify2.SetMagnificationFactors(IMAGE_MAG_X, IMAGE_MAG_Y, IMAGE_MAG_Z)
magnify2.ReleaseDataFlagOn()
# a filter that does in place processing (magnify ReleaseDataFlagOn)
cursor = vtk.vtkImageCursor3D()
cursor.SetInputConnection(magnify1.GetOutputPort())
cursor.SetCursorPosition(CURSOR_X * IMAGE_MAG_X,
CURSOR_Y * IMAGE_MAG_Y,
CURSOR_Z * IMAGE_MAG_Z)
cursor.SetCursorValue(255)
cursor.SetCursorRadius(50 * IMAGE_MAG_X)
# stream to increase coverage of in place filter.
# put the two together in one image
imageAppend = vtk.vtkImageAppend()
imageAppend.SetAppendAxis(0)
imageAppend.AddInputConnection(magnify2.GetOutputPort())
imageAppend.AddInputConnection(cursor.GetOutputPort())
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(imageAppend.GetOutputPort())
viewer.SetZSlice(CURSOR_Z * IMAGE_MAG_Z)
viewer.SetColorWindow(200)
viewer.SetColorLevel(80)
# viewer DebugOn
viewer.Render()
viewer.SetPosition(50, 50)
# make interface
viewer.Render()
|
{
"content_hash": "6a89f615988d50161138eece2d51afaa",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 71,
"avg_line_length": 31.017857142857142,
"alnum_prop": 0.770293609671848,
"repo_name": "msmolens/VTK",
"id": "e532162e084c22f8a947b7782511955981945fa6",
"size": "1759",
"binary": false,
"copies": "20",
"ref": "refs/heads/slicer-v6.3.0-2015-07-21-426987d",
"path": "Imaging/Core/Testing/Python/TestInPlaceFilter.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "46630728"
},
{
"name": "C++",
"bytes": "69146638"
},
{
"name": "CMake",
"bytes": "1662070"
},
{
"name": "CSS",
"bytes": "186729"
},
{
"name": "Cuda",
"bytes": "29062"
},
{
"name": "D",
"bytes": "2081"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "GLSL",
"bytes": "205024"
},
{
"name": "Groff",
"bytes": "65394"
},
{
"name": "HTML",
"bytes": "292104"
},
{
"name": "Java",
"bytes": "196895"
},
{
"name": "JavaScript",
"bytes": "1130278"
},
{
"name": "Lex",
"bytes": "45258"
},
{
"name": "Objective-C",
"bytes": "33460"
},
{
"name": "Objective-C++",
"bytes": "245313"
},
{
"name": "Pascal",
"bytes": "3255"
},
{
"name": "Perl",
"bytes": "173168"
},
{
"name": "Prolog",
"bytes": "4406"
},
{
"name": "Python",
"bytes": "16119638"
},
{
"name": "Shell",
"bytes": "75748"
},
{
"name": "Slash",
"bytes": "1476"
},
{
"name": "Smarty",
"bytes": "1325"
},
{
"name": "Tcl",
"bytes": "1845956"
},
{
"name": "Yacc",
"bytes": "174481"
}
],
"symlink_target": ""
}
|
import os
import re
from sdkconfig import SDKConfig
from pyparsing import OneOrMore
from pyparsing import restOfLine
from pyparsing import alphanums
from pyparsing import Word
from pyparsing import alphas
from pyparsing import ParseFatalException
from pyparsing import Suppress
from pyparsing import Group
from pyparsing import Literal
from pyparsing import ZeroOrMore
from pyparsing import Optional
from pyparsing import originalTextFor
from pyparsing import Forward
from pyparsing import indentedBlock
from collections import namedtuple
import abc
KeyGrammar = namedtuple("KeyGrammar", "grammar min max required")
class FragmentFile():
"""
Fragment file internal representation. Parses and stores instances of the fragment definitions
contained within the file.
"""
def __init__(self, fragment_file, sdkconfig):
try:
fragment_file = open(fragment_file, "r")
except TypeError:
pass
path = os.path.realpath(fragment_file.name)
indent_stack = [1]
class parse_ctx:
fragment = None # current fragment
key = "" # current key
keys = list() # list of keys parsed
key_grammar = None # current key grammar
@staticmethod
def reset():
parse_ctx.fragment_instance = None
parse_ctx.key = ""
parse_ctx.keys = list()
parse_ctx.key_grammar = None
def fragment_type_parse_action(toks):
parse_ctx.reset()
parse_ctx.fragment = FRAGMENT_TYPES[toks[0]]() # create instance of the fragment
return None
def expand_conditionals(toks, stmts):
try:
stmt = toks["value"]
stmts.append(stmt)
except KeyError:
try:
conditions = toks["conditional"]
for condition in conditions:
try:
_toks = condition[1]
_cond = condition[0]
if sdkconfig.evaluate_expression(_cond):
expand_conditionals(_toks, stmts)
break
except IndexError:
expand_conditionals(condition[0], stmts)
except KeyError:
for tok in toks:
expand_conditionals(tok, stmts)
def key_body_parsed(pstr, loc, toks):
stmts = list()
expand_conditionals(toks, stmts)
if parse_ctx.key_grammar.min and len(stmts) < parse_ctx.key_grammar.min:
raise ParseFatalException(pstr, loc, "fragment requires at least %d values for key '%s'" %
(parse_ctx.key_grammar.min, parse_ctx.key))
if parse_ctx.key_grammar.max and len(stmts) > parse_ctx.key_grammar.max:
raise ParseFatalException(pstr, loc, "fragment requires at most %d values for key '%s'" %
(parse_ctx.key_grammar.max, parse_ctx.key))
try:
parse_ctx.fragment.set_key_value(parse_ctx.key, stmts)
except Exception as e:
raise ParseFatalException(pstr, loc, "unable to add key '%s'; %s" % (parse_ctx.key, e.message))
return None
key = Word(alphanums + "_") + Suppress(":")
key_stmt = Forward()
condition_block = indentedBlock(key_stmt, indent_stack)
key_stmts = OneOrMore(condition_block)
key_body = Suppress(key) + key_stmts
key_body.setParseAction(key_body_parsed)
condition = originalTextFor(SDKConfig.get_expression_grammar()).setResultsName("condition")
if_condition = Group(Suppress("if") + condition + Suppress(":") + condition_block)
elif_condition = Group(Suppress("elif") + condition + Suppress(":") + condition_block)
else_condition = Group(Suppress("else") + Suppress(":") + condition_block)
conditional = (if_condition + Optional(OneOrMore(elif_condition)) + Optional(else_condition)).setResultsName("conditional")
def key_parse_action(pstr, loc, toks):
key = toks[0]
if key in parse_ctx.keys:
raise ParseFatalException(pstr, loc, "duplicate key '%s' value definition" % parse_ctx.key)
parse_ctx.key = key
parse_ctx.keys.append(key)
try:
parse_ctx.key_grammar = parse_ctx.fragment.get_key_grammars()[key]
key_grammar = parse_ctx.key_grammar.grammar
except KeyError:
raise ParseFatalException(pstr, loc, "key '%s' is not supported by fragment" % key)
except Exception as e:
raise ParseFatalException(pstr, loc, "unable to parse key '%s'; %s" % (key, e.message))
key_stmt << (conditional | Group(key_grammar).setResultsName("value"))
return None
def name_parse_action(pstr, loc, toks):
parse_ctx.fragment.name = toks[0]
key.setParseAction(key_parse_action)
ftype = Word(alphas).setParseAction(fragment_type_parse_action)
fid = Suppress(":") + Word(alphanums + "_.").setResultsName("name")
fid.setParseAction(name_parse_action)
header = Suppress("[") + ftype + fid + Suppress("]")
def fragment_parse_action(pstr, loc, toks):
key_grammars = parse_ctx.fragment.get_key_grammars()
required_keys = set([k for (k,v) in key_grammars.items() if v.required])
present_keys = required_keys.intersection(set(parse_ctx.keys))
if present_keys != required_keys:
raise ParseFatalException(pstr, loc, "required keys %s for fragment not found" %
list(required_keys - present_keys))
return parse_ctx.fragment
fragment_stmt = Forward()
fragment_block = indentedBlock(fragment_stmt, indent_stack)
fragment_if_condition = Group(Suppress("if") + condition + Suppress(":") + fragment_block)
fragment_elif_condition = Group(Suppress("elif") + condition + Suppress(":") + fragment_block)
fragment_else_condition = Group(Suppress("else") + Suppress(":") + fragment_block)
fragment_conditional = (fragment_if_condition + Optional(OneOrMore(fragment_elif_condition)) +
Optional(fragment_else_condition)).setResultsName("conditional")
fragment = (header + OneOrMore(indentedBlock(key_body, indent_stack, False))).setResultsName("value")
fragment.setParseAction(fragment_parse_action)
fragment.ignore("#" + restOfLine)
deprecated_mapping = DeprecatedMapping.get_fragment_grammar(sdkconfig, fragment_file.name).setResultsName("value")
fragment_stmt << (Group(deprecated_mapping) | Group(fragment) | Group(fragment_conditional))
def fragment_stmt_parsed(pstr, loc, toks):
stmts = list()
expand_conditionals(toks, stmts)
return stmts
parser = ZeroOrMore(fragment_stmt)
parser.setParseAction(fragment_stmt_parsed)
self.fragments = parser.parseFile(fragment_file, parseAll=True)
for fragment in self.fragments:
fragment.path = path
class Fragment():
__metaclass__ = abc.ABCMeta
"""
Encapsulates a fragment as defined in the generator syntax. Sets values common to all fragment and performs processing
such as checking the validity of the fragment name and getting the entry values.
"""
IDENTIFIER = Word(alphas + "_", alphanums + "_")
ENTITY = Word(alphanums + ".-_$")
@abc.abstractmethod
def set_key_value(self, key, parse_results):
pass
@abc.abstractmethod
def get_key_grammars(self):
pass
class Sections(Fragment):
grammars = {
"entries": KeyGrammar(Word(alphanums + "+.").setResultsName("section"), 1, None, True)
}
"""
Utility function that returns a list of sections given a sections fragment entry,
with the '+' notation and symbol concatenation handled automatically.
"""
@staticmethod
def get_section_data_from_entry(sections_entry, symbol=None):
if not symbol:
sections = list()
sections.append(sections_entry.replace("+", ""))
sections.append(sections_entry.replace("+", ".*"))
return sections
else:
if sections_entry.endswith("+"):
section = sections_entry.replace("+", ".*")
expansion = section.replace(".*", "." + symbol)
return (section, expansion)
else:
return (sections_entry, None)
def set_key_value(self, key, parse_results):
if key == "entries":
self.entries = set()
for result in parse_results:
self.entries.add(result["section"])
def get_key_grammars(self):
return self.__class__.grammars
class Scheme(Fragment):
"""
Encapsulates a scheme fragment, which defines what target input sections are placed under.
"""
grammars = {
"entries": KeyGrammar(Fragment.IDENTIFIER.setResultsName("sections") + Suppress("->") +
Fragment.IDENTIFIER.setResultsName("target"), 1, None, True)
}
def set_key_value(self, key, parse_results):
if key == "entries":
self.entries = set()
for result in parse_results:
self.entries.add((result["sections"], result["target"]))
def get_key_grammars(self):
return self.__class__.grammars
class Mapping(Fragment):
"""
Encapsulates a mapping fragment, which defines what targets the input sections of mappable entties are placed under.
"""
MAPPING_ALL_OBJECTS = "*"
def __init__(self):
Fragment.__init__(self)
self.entries = set()
def set_key_value(self, key, parse_results):
if key == "archive":
self.archive = parse_results[0]["archive"]
elif key == "entries":
for result in parse_results:
obj = None
symbol = None
scheme = None
try:
obj = result["object"]
except KeyError:
pass
try:
symbol = result["symbol"]
except KeyError:
pass
try:
scheme = result["scheme"]
except KeyError:
pass
self.entries.add((obj, symbol, scheme))
def get_key_grammars(self):
# There are three possible patterns for mapping entries:
# obj:symbol (scheme)
# obj (scheme)
# * (scheme)
obj = Fragment.ENTITY.setResultsName("object")
symbol = Suppress(":") + Fragment.IDENTIFIER.setResultsName("symbol")
scheme = Suppress("(") + Fragment.IDENTIFIER.setResultsName("scheme") + Suppress(")")
pattern1 = obj + symbol + scheme
pattern2 = obj + scheme
pattern3 = Literal(Mapping.MAPPING_ALL_OBJECTS).setResultsName("object") + scheme
entry = pattern1 | pattern2 | pattern3
grammars = {
"archive": KeyGrammar(Fragment.ENTITY.setResultsName("archive"), 1, 1, True),
"entries": KeyGrammar(entry, 0, None, True)
}
return grammars
class DeprecatedMapping():
"""
Encapsulates a mapping fragment, which defines what targets the input sections of mappable entties are placed under.
"""
# Name of the default condition entry
DEFAULT_CONDITION = "default"
MAPPING_ALL_OBJECTS = "*"
@staticmethod
def get_fragment_grammar(sdkconfig, fragment_file):
# Match header [mapping]
header = Suppress("[") + Suppress("mapping") + Suppress("]")
# There are three possible patterns for mapping entries:
# obj:symbol (scheme)
# obj (scheme)
# * (scheme)
obj = Fragment.ENTITY.setResultsName("object")
symbol = Suppress(":") + Fragment.IDENTIFIER.setResultsName("symbol")
scheme = Suppress("(") + Fragment.IDENTIFIER.setResultsName("scheme") + Suppress(")")
pattern1 = Group(obj + symbol + scheme)
pattern2 = Group(obj + scheme)
pattern3 = Group(Literal(Mapping.MAPPING_ALL_OBJECTS).setResultsName("object") + scheme)
mapping_entry = pattern1 | pattern2 | pattern3
# To simplify parsing, classify groups of condition-mapping entry into two types: normal and default
# A normal grouping is one with a non-default condition. The default grouping is one which contains the
# default condition
mapping_entries = Group(ZeroOrMore(mapping_entry)).setResultsName("mappings")
normal_condition = Suppress(":") + originalTextFor(SDKConfig.get_expression_grammar())
default_condition = Optional(Suppress(":") + Literal(DeprecatedMapping.DEFAULT_CONDITION))
normal_group = Group(normal_condition.setResultsName("condition") + mapping_entries)
default_group = Group(default_condition + mapping_entries).setResultsName("default_group")
normal_groups = Group(ZeroOrMore(normal_group)).setResultsName("normal_groups")
# Any mapping fragment definition can have zero or more normal group and only one default group as a last entry.
archive = Suppress("archive") + Suppress(":") + Fragment.ENTITY.setResultsName("archive")
entries = Suppress("entries") + Suppress(":") + (normal_groups + default_group).setResultsName("entries")
mapping = Group(header + archive + entries)
mapping.ignore("#" + restOfLine)
def parsed_deprecated_mapping(pstr, loc, toks):
fragment = Mapping()
fragment.archive = toks[0].archive
fragment.name = re.sub(r"[^0-9a-zA-Z]+", "_", fragment.archive)
fragment.entries = set()
condition_true = False
for entries in toks[0].entries[0]:
condition = next(iter(entries.condition.asList())).strip()
condition_val = sdkconfig.evaluate_expression(condition)
if condition_val:
for entry in entries[1]:
fragment.entries.add((entry.object, None if entry.symbol == '' else entry.symbol, entry.scheme))
condition_true = True
break
if not fragment.entries and not condition_true:
try:
entries = toks[0].entries[1][1]
except IndexError:
entries = toks[0].entries[1][0]
for entry in entries:
fragment.entries.add((entry.object, None if entry.symbol == '' else entry.symbol, entry.scheme))
if not fragment.entries:
fragment.entries.add(("*", None, "default"))
dep_warning = str(ParseFatalException(pstr, loc,
"Warning: Deprecated old-style mapping fragment parsed in file %s." % fragment_file))
print(dep_warning)
return fragment
mapping.setParseAction(parsed_deprecated_mapping)
return mapping
FRAGMENT_TYPES = {
"sections": Sections,
"scheme": Scheme,
"mapping": Mapping
}
|
{
"content_hash": "b594fcb11fcf8951afcbfebf1e65ab53",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 131,
"avg_line_length": 38.11029411764706,
"alnum_prop": 0.5903916650588462,
"repo_name": "krzychb/rtd-test-bed",
"id": "dd8295f8692abfe8e39a0394193daa38604859bd",
"size": "16158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/ldgen/fragments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "248929"
},
{
"name": "Batchfile",
"bytes": "9428"
},
{
"name": "C",
"bytes": "42611901"
},
{
"name": "C++",
"bytes": "10437923"
},
{
"name": "CMake",
"bytes": "316611"
},
{
"name": "CSS",
"bytes": "1340"
},
{
"name": "Dockerfile",
"bytes": "4319"
},
{
"name": "GDB",
"bytes": "2764"
},
{
"name": "Go",
"bytes": "146670"
},
{
"name": "HCL",
"bytes": "468"
},
{
"name": "HTML",
"bytes": "115431"
},
{
"name": "Inno Setup",
"bytes": "14977"
},
{
"name": "Lex",
"bytes": "7273"
},
{
"name": "M4",
"bytes": "189150"
},
{
"name": "Makefile",
"bytes": "439631"
},
{
"name": "Objective-C",
"bytes": "133538"
},
{
"name": "PHP",
"bytes": "498"
},
{
"name": "Pawn",
"bytes": "151052"
},
{
"name": "Perl",
"bytes": "141532"
},
{
"name": "Python",
"bytes": "1868534"
},
{
"name": "Roff",
"bytes": "102712"
},
{
"name": "Ruby",
"bytes": "206821"
},
{
"name": "Shell",
"bytes": "625528"
},
{
"name": "Smarty",
"bytes": "5972"
},
{
"name": "Tcl",
"bytes": "110"
},
{
"name": "TeX",
"bytes": "1961"
},
{
"name": "Visual Basic",
"bytes": "294"
},
{
"name": "XSLT",
"bytes": "80335"
},
{
"name": "Yacc",
"bytes": "15875"
}
],
"symlink_target": ""
}
|
import sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import pytest
from s3streamer import S3Streamer
def cat_read(stream):
reads = []
while(True):
d = stream.read(1000)
if not d:
break
reads.append(d)
return b"".join(reads)
def test_incorrect_parameters(s3fakeconn):
with pytest.raises(TypeError):
S3Streamer('bucket', 'stuff',
s3_connection=s3fakeconn, #just make sure no connect
incorrect_key="Test")
def test_single_key(s3fakeconn, keydatawithnewline):
stream = S3Streamer('bucket', 'stuff', s3_connection=s3fakeconn)
d_cat = cat_read(stream).decode('utf8')
print("DCAT", d_cat.__repr__())
print("DATA", keydatawithnewline[0].__repr__())
assert d_cat == keydatawithnewline[0]
def test_prefix(s3fakeconn, keydatacated):
stream = S3Streamer('bucket', 'stuff', s3_connection=s3fakeconn, key_is_prefix=True)
d_cat = cat_read(stream).decode('utf8')
print("DCAT", d_cat.__repr__())
print("DATA", keydatacated.__repr__())
assert d_cat == keydatacated
def test_readline(s3fakeconn, keydatacatedsplitonnewline):
stream = S3Streamer('bucket', 'stuff', s3_connection=s3fakeconn, key_is_prefix=True)
for l in keydatacatedsplitonnewline:
d = stream.readline().decode('utf8')
print("READLINE:", d.__repr__())
print("CORRECTL:", l.__repr__())
assert d == l
def test_iter(s3fakeconn, keydatacatedsplitonnewline):
stream = S3Streamer('bucket', 'stuff', s3_connection=s3fakeconn, key_is_prefix=True)
d = [b.decode('utf8') for b in stream]
print("READLINE:", d.__repr__())
print("CORRECTL:", keydatacatedsplitonnewline.__repr__())
assert d == keydatacatedsplitonnewline
|
{
"content_hash": "66c3a23f058a883219a0846bda2f6f65",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 88,
"avg_line_length": 34.18867924528302,
"alnum_prop": 0.6440397350993378,
"repo_name": "diamondman/pys3streamer",
"id": "d83a4d720d58c49d301e147c5f011000924e07f5",
"size": "1812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_reading.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10348"
}
],
"symlink_target": ""
}
|
from direct.distributed.ClockDelta import *
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.task import Task
from otp.ai.AIBase import *
from toontown.building import DistributedElevatorExtAI
from toontown.building.ElevatorConstants import *
from toontown.toonbase import ToontownGlobals
class DistributedLawOfficeElevatorExtAI(DistributedElevatorExtAI.DistributedElevatorExtAI):
def __init__(self, air, bldg, lawOfficeId, entranceId, antiShuffle = 0, minLaff = 0):
DistributedElevatorExtAI.DistributedElevatorExtAI.__init__(self, air, bldg, antiShuffle = antiShuffle, minLaff = minLaff)
self.lawOfficeId = lawOfficeId
self.entranceId = entranceId
def getEntranceId(self):
return self.entranceId
def elevatorClosed(self):
numPlayers = self.countFullSeats()
if numPlayers > 0:
players = []
for i in self.seats:
if i not in [
None,
0]:
players.append(i)
continue
lawOfficeZone = self.bldg.createLawOffice(self.lawOfficeId, self.entranceId, players)
for seatIndex in xrange(len(self.seats)):
avId = self.seats[seatIndex]
if avId:
self.sendUpdateToAvatarId(avId, 'setLawOfficeInteriorZone', [lawOfficeZone])
self.clearFullNow(seatIndex)
else:
self.notify.warning('The elevator left, but was empty.')
self.fsm.request('closed')
def enterClosed(self):
DistributedElevatorExtAI.DistributedElevatorExtAI.enterClosed(self)
self.fsm.request('opening')
def sendAvatarsToDestination(self, avIdList):
if len(avIdList) > 0:
officeZone = self.bldg.createLawOffice(self.lawOfficeId, self.entranceId, avIdList)
for avId in avIdList:
if avId:
self.sendUpdateToAvatarId(avId, 'setLawOfficeInteriorZoneForce', [officeZone])
|
{
"content_hash": "b81108bb1015ff8790ed3f58db1f3190",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 129,
"avg_line_length": 41.48979591836735,
"alnum_prop": 0.6522380718150517,
"repo_name": "linktlh/Toontown-journey",
"id": "15c250145968d7881c481c3f01f68d599033aa8e",
"size": "2033",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "toontown/coghq/DistributedLawOfficeElevatorExtAI.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
""" Testing data types for ndimage calls
"""
import sys
import numpy as np
from numpy.testing import (assert_array_almost_equal, dec,
assert_array_equal)
from nose.tools import assert_true, assert_equal, assert_raises
from scipy import ndimage
def test_map_coordinates_dts():
# check that ndimage accepts different data types for interpolation
data = np.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
shifted_data = np.array([[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
idx = np.indices(data.shape)
dts = (np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.intp, np.uintp, np.float32, np.float64)
for order in range(0, 6):
for data_dt in dts:
these_data = data.astype(data_dt)
for coord_dt in dts:
# affine mapping
mat = np.eye(2, dtype=coord_dt)
off = np.zeros((2,), dtype=coord_dt)
out = ndimage.affine_transform(these_data, mat, off)
assert_array_almost_equal(these_data, out)
# map coordinates
coords_m1 = idx.astype(coord_dt) - 1
coords_p10 = idx.astype(coord_dt) + 10
out = ndimage.map_coordinates(these_data, coords_m1, order=order)
assert_array_almost_equal(out, shifted_data)
# check constant fill works
out = ndimage.map_coordinates(these_data, coords_p10, order=order)
assert_array_almost_equal(out, np.zeros((3,4)))
# check shift and zoom
out = ndimage.shift(these_data, 1)
assert_array_almost_equal(out, shifted_data)
out = ndimage.zoom(these_data, 1)
assert_array_almost_equal(these_data, out)
@dec.knownfailureif(not sys.platform == 'darwin')
def test_uint64_max():
# Test interpolation respects uint64 max. Reported to fail at least on
# win32 (due to the 32 bit visual C compiler using signed int64 when
# converting between uint64 to double) and Debian on s390x.
big = 2**64-1
arr = np.array([big, big, big], dtype=np.uint64)
# Tests geometric transform (map_coordinates, affine_transform)
inds = np.indices(arr.shape) - 0.1
x = ndimage.map_coordinates(arr, inds)
assert_true(x[1] > (2**63))
# Tests zoom / shift
x = ndimage.shift(arr, 0.1)
assert_true(x[1] > (2**63))
|
{
"content_hash": "7af654a73becec15fae5e36c02f2f18c",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 82,
"avg_line_length": 40.507936507936506,
"alnum_prop": 0.5693573667711599,
"repo_name": "teoliphant/scipy",
"id": "85027e31d04247b3a53a2d8f5dba535f80002388",
"size": "2552",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/ndimage/tests/test_datatypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11530901"
},
{
"name": "C++",
"bytes": "7695320"
},
{
"name": "FORTRAN",
"bytes": "5898903"
},
{
"name": "Matlab",
"bytes": "1861"
},
{
"name": "Objective-C",
"bytes": "137083"
},
{
"name": "Python",
"bytes": "5863600"
},
{
"name": "Shell",
"bytes": "1793"
}
],
"symlink_target": ""
}
|
from __future__ import division
import errno
import os
import shutil
import time
import uuid
from collections import namedtuple
from itertools import izip as zip
from itertools import repeat
from cassandra import WriteFailure
from cassandra.concurrent import (execute_concurrent,
execute_concurrent_with_args)
from ccmlib.node import Node
from nose.tools import assert_equal, assert_less_equal
from dtest import Tester, create_ks, debug
from tools.data import rows_to_list
from tools.decorators import since
from tools.files import size_of_files_in_dir
from tools.funcutils import get_rate_limited_function
from tools.hacks import advance_to_next_cl_segment
_16_uuid_column_spec = (
'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, e uuid, f uuid, g uuid, '
'h uuid, i uuid, j uuid, k uuid, l uuid, m uuid, n uuid, o uuid, '
'p uuid'
)
def _insert_rows(session, table_name, insert_stmt, values):
prepared_insert = session.prepare(insert_stmt)
values = list(values) # in case values is a generator
execute_concurrent(session, ((prepared_insert, x) for x in values),
concurrency=500, raise_on_first_error=True)
data_loaded = rows_to_list(session.execute('SELECT * FROM ' + table_name))
debug('{n} rows inserted into {table_name}'.format(n=len(data_loaded), table_name=table_name))
# use assert_equal over assert_length_equal to avoid printing out
# potentially large lists
assert_equal(len(values), len(data_loaded))
return data_loaded
def _move_contents(source_dir, dest_dir, verbose=True):
for source_filename in os.listdir(source_dir):
source_path, dest_path = (os.path.join(source_dir, source_filename),
os.path.join(dest_dir, source_filename))
if verbose:
debug('moving {} to {}'.format(source_path, dest_path))
shutil.move(source_path, dest_path)
def _get_16_uuid_insert_stmt(ks_name, table_name):
return (
'INSERT INTO {ks_name}.{table_name} '
'(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) '
'VALUES (uuid(), uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid(), uuid())'
).format(ks_name=ks_name, table_name=table_name)
def _get_create_table_statement(ks_name, table_name, column_spec, options=None):
if options:
options_pairs = ('{k}={v}'.format(k=k, v=v) for (k, v) in options.iteritems())
options_string = 'WITH ' + ' AND '.join(options_pairs)
else:
options_string = ''
return (
'CREATE TABLE ' + ks_name + '.' + table_name + ' '
'(' + column_spec + ') ' + options_string
)
def _write_to_cdc_WriteFailure(session, insert_stmt):
prepared = session.prepare(insert_stmt)
start, rows_loaded, error_found = time.time(), 0, False
rate_limited_debug = get_rate_limited_function(debug, 5)
while not error_found:
# We want to fail if inserting data takes too long. Locally this
# takes about 10s, but let's be generous.
assert_less_equal(
(time.time() - start), 600,
"It's taken more than 10 minutes to reach a WriteFailure trying "
'to overrun the space designated for CDC commitlogs. This could '
"be because data isn't being written quickly enough in this "
'environment, or because C* is failing to reject writes when '
'it should.'
)
# If we haven't logged from here in the last 5s, do so.
rate_limited_debug(
' data load step has lasted {s:.2f}s, '
'loaded {r} rows'.format(s=(time.time() - start), r=rows_loaded))
batch_results = list(execute_concurrent(
session,
((prepared, ()) for _ in range(1000)),
concurrency=500,
# Don't propagate errors to the main thread. We expect at least
# one WriteFailure, so we handle it below as part of the
# results recieved from this method.
raise_on_first_error=False
))
# Here, we track the number of inserted values by getting the
# number of successfully completed statements...
rows_loaded += len([br for br in batch_results if br[0]])
# then, we make sure that the only failures are the expected
# WriteFailures.
assert_equal([],
[result for (success, result) in batch_results
if not success and not isinstance(result, WriteFailure)])
# Finally, if we find a WriteFailure, that means we've inserted all
# the CDC data we can and so we flip error_found to exit the loop.
if any(isinstance(result, WriteFailure) for (_, result) in batch_results):
debug("write failed (presumably because we've overrun "
'designated CDC commitlog space) after '
'loading {r} rows in {s:.2f}s'.format(
r=rows_loaded,
s=time.time() - start))
error_found = True
return rows_loaded
_TableInfoNamedtuple = namedtuple('TableInfoNamedtuple', [
# required
'ks_name', 'table_name', 'column_spec',
# optional
'options', 'insert_stmt',
# derived
'name', 'create_stmt'
])
class TableInfo(_TableInfoNamedtuple):
__slots__ = ()
def __new__(cls, ks_name, table_name, column_spec, options=None, insert_stmt=None):
name = ks_name + '.' + table_name
create_stmt = _get_create_table_statement(ks_name, table_name, column_spec, options)
self = super(TableInfo, cls).__new__(
cls,
# required
ks_name=ks_name, table_name=table_name, column_spec=column_spec,
# optional
options=options, insert_stmt=insert_stmt,
# derived
name=name, create_stmt=create_stmt
)
return self
def _set_cdc_on_table(session, table_name, value, ks_name=None):
"""
Uses <session> to set CDC to <value> on <ks_name>.<table_name>.
"""
table_string = ks_name + '.' + table_name if ks_name else table_name
value_string = 'true' if value else 'false'
stmt = 'ALTER TABLE ' + table_string + ' WITH CDC = ' + value_string
debug(stmt)
session.execute(stmt)
def _get_set_cdc_func(session, ks_name, table_name):
"""
Close over a session, keyspace name, and table name and return a function
that takes enables CDC on that keyspace if its argument is truthy and
otherwise disables it.
"""
def set_cdc(value):
return _set_cdc_on_table(
session=session,
ks_name=ks_name, table_name=table_name,
value=value
)
return set_cdc
def _get_commitlog_files(node_path):
commitlog_dir = os.path.join(node_path, 'commitlogs')
return {
os.path.join(commitlog_dir, name)
for name in os.listdir(commitlog_dir)
}
def _get_cdc_raw_files(node_path, cdc_raw_dir_name='cdc_raw'):
commitlog_dir = os.path.join(node_path, cdc_raw_dir_name)
return {
os.path.join(commitlog_dir, name)
for name in os.listdir(commitlog_dir)
}
@since('3.8')
class TestCDC(Tester):
"""
@jira_ticket CASSANDRA-8844
Test the correctness of some features of CDC, Change Data Capture, which
provides a view of the commitlog on tables for which it is enabled.
"""
def _create_temp_dir(self, dir_name, verbose=True):
"""
Create a directory that will be deleted when this test class is torn
down.
"""
if verbose:
debug('creating ' + dir_name)
try:
os.mkdir(dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
debug(dir_name + ' already exists. removing and recreating.')
shutil.rmtree(dir_name)
os.mkdir(dir_name)
else:
raise e
def debug_and_rmtree():
shutil.rmtree(dir_name)
debug(dir_name + ' removed')
self.addCleanup(debug_and_rmtree)
def prepare(self, ks_name,
table_name=None, cdc_enabled_table=None,
gc_grace_seconds=None,
column_spec=None,
configuration_overrides=None,
table_id=None):
"""
Create a 1-node cluster, start it, create a keyspace, and if
<table_name>, create a table in that keyspace. If <cdc_enabled_table>,
that table is created with CDC enabled. If <column_spec>, use that
string to specify the schema of the table -- for example, a valid value
is 'a int PRIMARY KEY, b int'. The <configuration_overrides> is
treated as a dict-like object and passed to
self.cluster.set_configuration_options.
"""
config_defaults = {
'cdc_enabled': True,
# we want to be able to generate new segments quickly
'commitlog_segment_size_in_mb': 2,
}
if configuration_overrides is None:
configuration_overrides = {}
self.cluster.populate(1)
self.cluster.set_configuration_options(dict(config_defaults, **configuration_overrides))
self.cluster.start(wait_for_binary_proto=True)
node = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node)
create_ks(session, ks_name, rf=1)
if table_name is not None:
self.assertIsNotNone(cdc_enabled_table, 'if creating a table in prepare, must specify whether or not CDC is enabled on it')
self.assertIsNotNone(column_spec, 'if creating a table in prepare, must specify its schema')
options = {}
if gc_grace_seconds is not None:
options['gc_grace_seconds'] = gc_grace_seconds
if table_id is not None:
options['id'] = table_id
if cdc_enabled_table:
options['cdc'] = 'true'
stmt = _get_create_table_statement(
ks_name, table_name, column_spec,
options=options
)
debug(stmt)
session.execute(stmt)
return node, session
def _assert_cdc_data_readable_on_round_trip(self, start_with_cdc_enabled):
"""
Parameterized test asserting that data written to a table is still
readable after flipping the CDC flag on that table, then flipping it
again. Starts with CDC enabled if start_with_cdc_enabled, otherwise
starts with it disabled.
"""
ks_name, table_name = 'ks', 'tab'
sequence = [True, False, True] if start_with_cdc_enabled else [False, True, False]
start_enabled, alter_path = sequence[0], list(sequence[1:])
node, session = self.prepare(ks_name=ks_name, table_name=table_name,
cdc_enabled_table=start_enabled,
column_spec='a int PRIMARY KEY, b int')
set_cdc = _get_set_cdc_func(session=session, ks_name=ks_name, table_name=table_name)
insert_stmt = session.prepare('INSERT INTO ' + table_name + ' (a, b) VALUES (?, ?)')
data = tuple(zip(list(range(1000)), list(range(1000))))
execute_concurrent_with_args(session, insert_stmt, data)
# We need data to be in commitlogs, not sstables.
self.assertEqual([], list(node.get_sstables(ks_name, table_name)))
for enable in alter_path:
set_cdc(enable)
self.assertItemsEqual(session.execute('SELECT * FROM ' + table_name), data)
def test_cdc_enabled_data_readable_on_round_trip(self):
"""
Test that data is readable after an enabled->disabled->enabled round
trip.
"""
self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=True)
def test_cdc_disabled_data_readable_on_round_trip(self):
"""
Test that data is readable after an disabled->enabled->disabled round
trip.
"""
self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=False)
def test_insertion_and_commitlog_behavior_after_reaching_cdc_total_space(self):
"""
Test that C* behaves correctly when CDC tables have consumed all the
space available to them. In particular: after writing
cdc_total_space_in_mb MB into CDC commitlogs:
- CDC writes are rejected
- non-CDC writes are accepted
- on flush, CDC commitlogs are copied to cdc_raw
- on flush, non-CDC commitlogs are not copied to cdc_raw
This is a lot of behavior to validate in one test, but we do so to
avoid running multiple tests that each write 1MB of data to fill
cdc_total_space_in_mb.
"""
ks_name = 'ks'
full_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='full_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'full_cdc_tab'),
options={'cdc': 'true'}
)
configuration_overrides = {
# Make CDC space as small as possible so we can fill it quickly.
'cdc_total_space_in_mb': 4,
}
node, session = self.prepare(
ks_name=ks_name,
configuration_overrides=configuration_overrides
)
session.execute(full_cdc_table_info.create_stmt)
# Later, we'll also make assertions about the behavior of non-CDC
# tables, so we create one here.
non_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='non_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'non_cdc_tab')
)
session.execute(non_cdc_table_info.create_stmt)
# We'll also make assertions about the behavior of CDC tables when
# other CDC tables have already filled the designated space for CDC
# commitlogs, so we create the second CDC table here.
empty_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='empty_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'empty_cdc_tab'),
options={'cdc': 'true'}
)
session.execute(empty_cdc_table_info.create_stmt)
# Here, we insert values into the first CDC table until we get a
# WriteFailure. This should happen when the CDC commitlogs take up 1MB
# or more.
debug('flushing non-CDC commitlogs')
node.flush()
# Then, we insert rows into the CDC table until we can't anymore.
debug('beginning data insert to fill CDC commitlogs')
rows_loaded = _write_to_cdc_WriteFailure(session, full_cdc_table_info.insert_stmt)
self.assertLess(0, rows_loaded,
'No CDC rows inserted. This may happen when '
'cdc_total_space_in_mb > commitlog_segment_size_in_mb')
commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
commitlogs_size = size_of_files_in_dir(commitlog_dir)
debug('Commitlog dir ({d}) is {b}B'.format(d=commitlog_dir, b=commitlogs_size))
# We should get a WriteFailure when trying to write to the CDC table
# that's filled the designated CDC space...
with self.assertRaises(WriteFailure):
session.execute(full_cdc_table_info.insert_stmt)
# or any CDC table.
with self.assertRaises(WriteFailure):
session.execute(empty_cdc_table_info.insert_stmt)
# Now we test for behaviors of non-CDC tables when we've exceeded
# cdc_total_space_in_mb.
#
# First, we drain and save the names of all the new discarded CDC
# segments
node.drain()
session.cluster.shutdown()
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node)
pre_non_cdc_write_cdc_raw_segments = _get_cdc_raw_files(node.get_path())
# save the names of all the commitlog segments written up to this
# point:
pre_non_cdc_write_segments = _get_commitlog_files(node.get_path())
# Check that writing to non-CDC tables succeeds even when writes to CDC
# tables are rejected:
non_cdc_prepared_insert = session.prepare(non_cdc_table_info.insert_stmt)
session.execute(non_cdc_prepared_insert, ()) # should not raise an exception
# Check the following property: any new commitlog segments written to
# after cdc_raw has reached its maximum configured size should not be
# moved to cdc_raw, on commitlog discard, because any such commitlog
# segments are written to non-CDC tables.
#
# First, write to non-cdc tables.
start, time_limit = time.time(), 600
rate_limited_debug = get_rate_limited_function(debug, 5)
debug('writing to non-cdc table')
# We write until we get a new commitlog segment.
while _get_commitlog_files(node.get_path()) <= pre_non_cdc_write_segments:
elapsed = time.time() - start
rate_limited_debug(' non-cdc load step has lasted {s:.2f}s'.format(s=elapsed))
self.assertLessEqual(
elapsed, time_limit,
"It's been over a {s}s and we haven't written a new "
"commitlog segment. Something is wrong.".format(s=time_limit)
)
execute_concurrent(
session,
((non_cdc_prepared_insert, ()) for _ in range(1000)),
concurrency=500,
raise_on_first_error=True,
)
# Finally, we check that draining doesn't move any new segments to cdc_raw:
node.drain()
session.cluster.shutdown()
self.assertEqual(pre_non_cdc_write_cdc_raw_segments, _get_cdc_raw_files(node.get_path()))
def _init_new_loading_node(self, ks_name, create_stmt, use_thrift=False):
loading_node = Node(
name='node2',
cluster=self.cluster,
auto_bootstrap=False,
thrift_interface=('127.0.0.2', 9160) if use_thrift else None,
storage_interface=('127.0.0.2', 7000),
jmx_port='7400',
remote_debug_port='0',
initial_token=None,
binary_interface=('127.0.0.2', 9042)
)
debug('adding node')
self.cluster.add(loading_node, is_seed=True)
debug('starting new node')
loading_node.start(wait_for_binary_proto=True)
debug('recreating ks and table')
loading_session = self.patient_exclusive_cql_connection(loading_node)
create_ks(loading_session, ks_name, rf=1)
debug('creating new table')
loading_session.execute(create_stmt)
debug('stopping new node')
loading_node.stop()
loading_session.cluster.shutdown()
return loading_node
def test_cdc_data_available_in_cdc_raw(self):
ks_name = 'ks'
# First, create a new node just for data generation.
generation_node, generation_session = self.prepare(ks_name=ks_name)
cdc_table_info = TableInfo(
ks_name=ks_name, table_name='cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'cdc_tab'),
options={
'cdc': 'true',
# give table an explicit id so when we create it again it's the
# same table and we can replay into it
'id': uuid.uuid4()
}
)
# Write until we get a new CL segment to avoid replaying initialization
# mutations from this node's startup into system tables in the other
# node. See CASSANDRA-11811.
advance_to_next_cl_segment(
session=generation_session,
commitlog_dir=os.path.join(generation_node.get_path(), 'commitlogs')
)
generation_session.execute(cdc_table_info.create_stmt)
# insert 10000 rows
inserted_rows = _insert_rows(generation_session, cdc_table_info.name, cdc_table_info.insert_stmt, repeat((), 10000))
# drain the node to guarantee all cl segements will be recycled
debug('draining')
generation_node.drain()
debug('stopping')
# stop the node and clean up all sessions attached to it
generation_node.stop()
generation_session.cluster.shutdown()
# create a new node to use for cdc_raw cl segment replay
loading_node = self._init_new_loading_node(ks_name, cdc_table_info.create_stmt, self.cluster.version() < '4')
# move cdc_raw contents to commitlog directories, then start the
# node again to trigger commitlog replay, which should replay the
# cdc_raw files we moved to commitlogs into memtables.
debug('moving cdc_raw and restarting node')
_move_contents(
os.path.join(generation_node.get_path(), 'cdc_raw'),
os.path.join(loading_node.get_path(), 'commitlogs')
)
loading_node.start(wait_for_binary_proto=True)
debug('node successfully started; waiting on log replay')
loading_node.grep_log('Log replay complete')
debug('log replay complete')
# final assertions
validation_session = self.patient_exclusive_cql_connection(loading_node)
data_in_cdc_table_after_restart = rows_to_list(
validation_session.execute('SELECT * FROM ' + cdc_table_info.name)
)
debug('found {cdc} values in CDC table'.format(
cdc=len(data_in_cdc_table_after_restart)
))
# Then we assert that the CDC data that we expect to be there is there.
# All data that was in CDC tables should have been copied to cdc_raw,
# then used in commitlog replay, so it should be back in the cluster.
self.assertEqual(
inserted_rows,
data_in_cdc_table_after_restart,
# The message on failure is too long, since cdc_data is thousands
# of items, so we print something else here
msg='not all expected data selected'
)
|
{
"content_hash": "e6ab635fd246b52164b2acd0941f382c",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 135,
"avg_line_length": 41.086238532110094,
"alnum_prop": 0.607449088960343,
"repo_name": "riptano/cassandra-dtest",
"id": "657e61a3bb173dc451fc61c9ce9b0d9f5ac98430",
"size": "22392",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cdc_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2350477"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
}
|
"""Implementation of a Tree model for surface data display.
"""
import numpy as np
from PyQt4.QtCore import *
from froi.core.dataobject import Surface
class TreeModel(QAbstractItemModel):
"""Definition of class TreeModel."""
# customized signals
repaint_surface = pyqtSignal()
idChanged = pyqtSignal()
def __init__(self, surfaces, parent=None):
"""Initialize an instance."""
super(TreeModel, self).__init__(parent)
self._data = surfaces
self._point_id = 0
self._current_index = QModelIndex()
def get_data(self):
return self._data
def index(self, row, column, parent):
"""Return the index of item in the model."""
if not self.hasIndex(row, column, parent):
return QModelIndex()
if not parent.isValid():
surf_item = self._data[row]
return self.createIndex(row, column, surf_item)
else:
surf_item = parent.internalPointer()
if surf_item in self._data:
ol_item = surf_item.overlays[surf_item.overlay_count()-1-row]
if ol_item:
return self.createIndex(row, column, ol_item)
else:
return QModelIndex()
else:
return QModelIndex()
def parent(self, index):
"""Return the parent of the model item with the given index."""
if not index.isValid():
return QModelIndex()
item = index.internalPointer()
if item in self._data:
return QModelIndex()
else:
for surf in self._data:
if item in surf.overlays:
return self.createIndex(self._data.index(surf), 0, surf)
def rowCount(self, parent):
"""Return the number of rows for display."""
depth = self.index_depth(parent)
if depth == 0:
return len(self._data)
elif depth == 1:
return parent.internalPointer().overlay_count()
elif depth == 2:
return 0
def columnCount(self, QModelIndex_parent=None, *args, **kwargs):
return 1
def data(self, index, role):
"""Return specific data."""
depth = self.index_depth(index)
if depth == 0:
return None
elif depth == 1:
item = index.internalPointer()
if role == Qt.UserRole + 2:
# FIXME to remove the role after refine visible bar's display
return 1.0
elif role == Qt.UserRole + 3:
return item.get_colormap()
elif role == Qt.UserRole + 6:
return item.current_geometry()
elif role == Qt.DisplayRole or role == Qt.EditRole:
return item.hemi_rl
elif depth == 2:
item = index.internalPointer()
if role == Qt.UserRole:
return item.get_vmin()
elif role == Qt.UserRole + 1:
return item.get_vmax()
elif role == Qt.UserRole + 2:
return item.get_alpha()
elif role == Qt.UserRole + 3:
return item.get_colormap()
elif role == Qt.UserRole + 4:
if self._point_id == -1:
return None
return item.get_current_map()[self._point_id]
elif role == Qt.UserRole + 5:
return item.get_data()
elif role == Qt.UserRole + 7:
return item.is_label()
elif role == Qt.UserRole + 8:
return item.is_visible()
elif role == Qt.UserRole + 9:
return item.current_map_index
elif role == Qt.UserRole + 10:
return item.get_current_map()
elif role == Qt.UserRole + 11:
return item.is_series()
elif role == Qt.DisplayRole or role == Qt.EditRole:
return item.get_name()
if role == Qt.CheckStateRole:
if index.column() == 0:
if item.is_visible():
return Qt.Checked
else:
return Qt.Unchecked
def flags(self, index):
"""Return the Qt flags for each data item."""
if not index.isValid():
return Qt.NoItemFlags
result = Qt.ItemIsSelectable | Qt.ItemIsUserCheckable
item = index.internalPointer()
if item not in self._data:
for surf in self._data:
if item in surf.overlays and surf.is_visible():
result |= Qt.ItemIsEnabled
else:
result |= Qt.ItemIsEnabled
return result
def headerData(self, section, orientation, role=None):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return 'Name'
return None
def setData(self, index, value, role=Qt.EditRole):
if not index.isValid():
return None
item = index.internalPointer()
if role == Qt.CheckStateRole and index.column() == 0:
if value == Qt.Unchecked:
item.set_visible(False)
else:
item.set_visible(True)
if item in self._data:
if role == Qt.UserRole + 3:
if item.get_colormap() != value:
item.set_colormap(value)
else:
return False
else:
if role == Qt.EditRole:
value_str = value.toPyObject()
if value_str != '':
if item.get_name() != value_str:
item.set_name(str(value_str))
else:
return False
else:
return False
elif role == Qt.UserRole:
if 'all' in value:
item.set_vmin(value.strip('all'), 'all')
elif str(item.get_vmin()) != value:
item.set_vmin(value)
else:
return False
elif role == Qt.UserRole + 1:
if 'all' in value:
item.set_vmax(value.strip('all'), 'all')
elif str(item.get_vmax()) != value:
item.set_vmax(value)
else:
return False
elif role == Qt.UserRole + 2:
if item.get_alpha() != value:
item.set_alpha(value)
else:
return False
elif role == Qt.UserRole + 3:
if item.get_colormap() != value:
item.set_colormap(value)
else:
return False
elif role == Qt.UserRole + 4:
if item.current_map_index != value and item.get_data().shape[1] > value:
item.current_map_index = value
self.emit(SIGNAL("mapChanged"))
else:
return False
self.dataChanged.emit(index, index)
self.repaint_surface.emit()
return True
def insertRow(self, row, item, parent):
self.beginInsertRows(parent, row, row)
if isinstance(item, Surface):
self._data.append(item) # insert(row, item)
self.endInsertRows()
def removeRow(self, row, parent):
self.beginRemoveRows(parent, row, row)
item = self.index(row, 0, parent).internalPointer()
parent_item = parent.internalPointer()
if item in self._data:
self._data.remove(item)
else:
parent_item.overlays.remove(item)
self.endRemoveRows()
def moveUp(self, index):
item = index.internalPointer()
row = index.row()
parent = index.parent()
self.beginMoveRows(parent, row, row, parent, row-1)
for surf in self._data:
if item in surf.overlays:
idx = surf.overlays.index(item)
surf.overlay_up(idx)
self.endMoveRows()
self.repaint_surface.emit()
def moveDown(self, index):
item = index.internalPointer()
row = index.row()
parent = index.parent()
self.beginMoveRows(parent, row+1, row+1, parent, row)
for surf in self._data:
if item in surf.overlays:
idx = surf.overlays.index(item)
surf.overlay_down(idx)
self.endMoveRows()
self.repaint_surface.emit()
def setCurrentIndex(self, index):
"""Set current row."""
if -1 <= index.row() <= self.rowCount(index.parent()):
self._current_index = index
self.emit(SIGNAL("currentIndexChanged"))
else:
raise ValueError('Invalid value.')
def current_index(self):
return self._current_index
def get_surface_index(self, index=None):
if index is None:
index = self._current_index
depth = self.index_depth(index)
if depth == 1:
surface_idx = index
elif depth == 2:
surface_idx = self.parent(index)
else:
return None
return surface_idx
def get_overlay_list(self, index=None):
if index is None:
index = self._current_index
overlay_list = []
surface_idx = self.get_surface_index(index)
if surface_idx is None:
return overlay_list
for row in range(self.rowCount(surface_idx)):
idx = self.index(row, 0, surface_idx)
overlay_list.append(self.data(idx, Qt.DisplayRole))
return overlay_list
def index_depth(self, index=None):
"""judge the depth of the index relative to the root"""
if index is None:
index = self._current_index
depth = 0
while True:
if not hasattr(index, "isValid") or not index.isValid():
return depth
else:
index = self.parent(index)
depth += 1
def add_item(self, index, source=None, vmin=None, vmax=None, colormap='jet',
alpha=1.0, visible=True, islabel=False, name=None):
if not index.isValid():
if not isinstance(source, Surface):
source = Surface(source, 0)
self.insertRow(index.row(), source, index)
else:
parent = index.parent()
if not parent.isValid():
surf_item = index.internalPointer()
else:
surf_item = parent.internalPointer()
if source is None:
source = np.zeros((surf_item.vertices_count(),))
surf_item.load_overlay(source, vmin=vmin, vmax=vmax, colormap=colormap, alpha=alpha,
visible=visible, islabel=islabel, name=name)
self.insertRow(index.row(), None, parent)
self.repaint_surface.emit()
return True
def del_item(self, index):
if not index.isValid():
return None
self.removeRow(index.row(), index.parent())
self.repaint_surface.emit()
if len(self._data) == 0:
self.emit(SIGNAL("modelEmpty"))
return True
def set_vertices_value(self, value, index=None, vertices=None, roi=None,
target_row=None):
if index is None:
index = self._current_index
depth = self.index_depth(index)
if depth == 2:
item = index.internalPointer()
if roi is not None:
# change values, which are equal to roi, to the new value.
vertices = item.get_roi_vertices(roi)
if target_row is None:
target_item = item
else:
target_idx = self.index(target_row, 0, self.parent(index))
target_item = target_idx.internalPointer()
target_item.set_vertices_value(vertices, value)
else:
return None
self.repaint_surface.emit()
def set_point_id(self, point_id):
self._point_id = point_id
self.idChanged.emit()
def get_point_id(self):
return self._point_id
def camera_to_show(self, azimuth, elevation, distance, focalpoint, roll):
self.emit(SIGNAL("camera_to_show"), azimuth, elevation, distance, focalpoint, roll)
def camera_to_edit(self, azimuth, elevation, distance, focalpoint, roll):
self.emit(SIGNAL("camera_to_edit"), azimuth, elevation, distance, focalpoint, roll)
|
{
"content_hash": "6ae63c5f09f59045e4078fec63284704",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 96,
"avg_line_length": 34.321525885558586,
"alnum_prop": 0.5227850111146396,
"repo_name": "sunshineDrizzle/FreeROI",
"id": "971cece38b8ff0609a987c7a64adce1911fdf92f",
"size": "12711",
"binary": false,
"copies": "2",
"ref": "refs/heads/surface_lab",
"path": "froi/widgets/treemodel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "827149"
},
{
"name": "Shell",
"bytes": "302"
}
],
"symlink_target": ""
}
|
'''
importer mixpanel lib
~~~~~~~~~~~~~~~~~~~~~
pre-installed mixpanel lib.
'''
|
{
"content_hash": "144143db594b3b640f95303d4507c132",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 31,
"avg_line_length": 11.875,
"alnum_prop": 0.4421052631578947,
"repo_name": "mindis/Keen-Importer",
"id": "46d92d722da9b9672ce6c598bc872efab8e494bf",
"size": "120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "importer/lib/mixpanel/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name = 'cenum',
packages = ['cenum'],
)
|
{
"content_hash": "6c6bbf0b839d51db7b8958a7dec5d310",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 28,
"avg_line_length": 15.166666666666666,
"alnum_prop": 0.5714285714285714,
"repo_name": "zmic/cenum",
"id": "e986b212fb70c85540c513ea43f35b089c11cc5e",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "18507"
},
{
"name": "Python",
"bytes": "15089"
}
],
"symlink_target": ""
}
|
import os, os.path
# The base directory of pnntprss data
base_dir = os.path.join(os.environ['HOME'], ".pnntprss")
# The directory containing group data
groups_dir = os.path.join(base_dir, "groups")
# default feed polling interval
feed_poll_interval = 1800
# how long an article lives for. may be overridden in group config.
# None means forever
article_lifetime = None
# user-agent string
user_agent = "pnntprss/0.01 +http://david.wragg.org/pnntprss/"
# how many feeds to retrieve concurrently when polling all feeds
feed_poll_concurrency = 4
# Logging settings
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=os.path.join(base_dir, "log"))
def log_to_stderr():
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter('%(message)s'))
logging.getLogger('').addHandler(console)
def get_logger(name):
return logging.getLogger(name)
|
{
"content_hash": "f1478f4074e371d79e429d5d273aa14e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 68,
"avg_line_length": 27.81578947368421,
"alnum_prop": 0.6868495742667928,
"repo_name": "dpw/pnntprss",
"id": "0b0eeb3b670d73ed9a63af1c6af9e324c02272ad",
"size": "1085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "213718"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
# Register your models here.
from polls.models import Poll, Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
fieldsets = [
('Question', {'fields':['question'], 'classes':['collapse']}),
('Date Information', {'fields':['pub_date'], 'classes':['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
admin.site.register(Poll, PollAdmin)
|
{
"content_hash": "af9ef5fb451d6def2e95ece77d6abe41",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 72,
"avg_line_length": 24.904761904761905,
"alnum_prop": 0.6940726577437859,
"repo_name": "qianyu668899/Django",
"id": "476a2a969285a4214ff1154c41db5a3d1a940997",
"size": "523",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test1/polls/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "890"
},
{
"name": "C",
"bytes": "447223"
},
{
"name": "C++",
"bytes": "2005"
},
{
"name": "CSS",
"bytes": "24544"
},
{
"name": "HTML",
"bytes": "77630"
},
{
"name": "JavaScript",
"bytes": "374"
},
{
"name": "Nginx",
"bytes": "787"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "209145"
}
],
"symlink_target": ""
}
|
import url
from rdflib import ConjunctiveGraph, URIRef
def is_url(text):
""" Check if the given text looks like a URL. """
if text is None:
return False
text = text.lower()
return text.startswith('http://') or text.startswith('https://') or \
text.startswith('urn:') or text.startswith('file://')
def safe_uriref(text):
""" Escape a URL properly. """
url_ = url.parse(text).sanitize().deuserinfo().canonical()
return URIRef(url_.punycode().unicode())
def sparql_store(query_url, update_url):
gs = ConjunctiveGraph('SPARQLUpdateStore')
gs.open((query_url, update_url))
return gs.store
class GraphException(Exception):
pass
|
{
"content_hash": "01ef5327c408f5de5ffb53c4a6d65f61",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 73,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.6550724637681159,
"repo_name": "pudo/jsongraph",
"id": "af7955604830f0e7bb7a5f73921222ab5f940c23",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsongraph/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "431"
},
{
"name": "Python",
"bytes": "40865"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.