repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
TeamWin/kernel_samsung_lt02ltetmo | refs/heads/android-4.4 | tools/perf/python/twatch.py | 7370 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
chongtianfeiyu/kbengine | refs/heads/master | kbe/src/lib/python/Doc/includes/sqlite3/text_factory.py | 51 | import sqlite3
con = sqlite3.connect(":memory:")
cur = con.cursor()
AUSTRIA = "\xd6sterreich"
# by default, rows are returned as Unicode
cur.execute("select ?", (AUSTRIA,))
row = cur.fetchone()
assert row[0] == AUSTRIA
# but we can make sqlite3 always return bytestrings ...
con.text_factory = bytes
cur.execute("select ?", (AUSTRIA,))
row = cur.fetchone()
assert type(row[0]) is bytes
# the bytestrings will be encoded in UTF-8, unless you stored garbage in the
# database ...
assert row[0] == AUSTRIA.encode("utf-8")
# we can also implement a custom text_factory ...
# here we implement one that appends "foo" to all strings
con.text_factory = lambda x: x.decode("utf-8") + "foo"
cur.execute("select ?", ("bar",))
row = cur.fetchone()
assert row[0] == "barfoo"
|
Workday/OpenFrame | refs/heads/master | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py | 1 | # -*- coding: utf-8; -*-
#
# Copyright (C) 2011 Google Inc. All rights reserved.
# Copyright (C) 2009 Torch Mobile Inc.
# Copyright (C) 2009 Apple Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for cpp_style.py."""
# FIXME: Add a good test that tests UpdateIncludeState.
import os
import random
import re
import unittest
import cpp as cpp_style
from cpp import CppChecker
from ..filter import FilterConfiguration
from webkitpy.common.system.filesystem import FileSystem
# This class works as an error collector and replaces cpp_style.Error
# function for the unit tests. We also verify each category we see
# is in STYLE_CATEGORIES, to help keep that list up to date.
class ErrorCollector:
_all_style_categories = CppChecker.categories
# This is a list including all categories seen in any unit test.
_seen_style_categories = {}
def __init__(self, assert_fn, filter=None, lines_to_check=None):
"""assert_fn: a function to call when we notice a problem.
filter: filters the errors that we are concerned about."""
self._assert_fn = assert_fn
self._errors = []
self._lines_to_check = lines_to_check
if not filter:
filter = FilterConfiguration()
self._filter = filter
def __call__(self, line_number, category, confidence, message):
self._assert_fn(category in self._all_style_categories,
'Message "%s" has category "%s",'
' which is not in STYLE_CATEGORIES' % (message, category))
if self._lines_to_check and not line_number in self._lines_to_check:
return False
if self._filter.should_check(category, ""):
self._seen_style_categories[category] = 1
self._errors.append('%s [%s] [%d]' % (message, category, confidence))
return True
def results(self):
if len(self._errors) < 2:
return ''.join(self._errors) # Most tests expect to have a string.
else:
return self._errors # Let's give a list if there is more than one.
def result_list(self):
return self._errors
def verify_all_categories_are_seen(self):
"""Fails if there's a category in _all_style_categories - _seen_style_categories.
This should only be called after all tests are run, so
_seen_style_categories has had a chance to fully populate. Since
this isn't called from within the normal unittest framework, we
can't use the normal unittest assert macros. Instead we just exit
when we see an error. Good thing this test is always run last!
"""
for category in self._all_style_categories:
if category not in self._seen_style_categories:
import sys
sys.exit('FATAL ERROR: There are no tests for category "%s"' % category)
class CppFunctionsTest(unittest.TestCase):
"""Supports testing functions that do not need CppStyleTestBase."""
def test_convert_to_lower_with_underscores(self):
self.assertEqual(cpp_style._convert_to_lower_with_underscores('ABC'), 'abc')
self.assertEqual(cpp_style._convert_to_lower_with_underscores('aB'), 'a_b')
self.assertEqual(cpp_style._convert_to_lower_with_underscores('isAName'), 'is_a_name')
self.assertEqual(cpp_style._convert_to_lower_with_underscores('AnotherTest'), 'another_test')
self.assertEqual(cpp_style._convert_to_lower_with_underscores('PassRefPtr<MyClass>'), 'pass_ref_ptr<my_class>')
self.assertEqual(cpp_style._convert_to_lower_with_underscores('_ABC'), '_abc')
def test_create_acronym(self):
self.assertEqual(cpp_style._create_acronym('ABC'), 'ABC')
self.assertEqual(cpp_style._create_acronym('IsAName'), 'IAN')
self.assertEqual(cpp_style._create_acronym('PassRefPtr<MyClass>'), 'PRP<MC>')
def test_is_c_or_objective_c(self):
clean_lines = cpp_style.CleansedLines([''])
clean_objc_lines = cpp_style.CleansedLines(['#import "header.h"'])
self.assertTrue(cpp_style._FileState(clean_lines, 'c').is_c_or_objective_c())
self.assertTrue(cpp_style._FileState(clean_lines, 'm').is_c_or_objective_c())
self.assertFalse(cpp_style._FileState(clean_lines, 'cpp').is_c_or_objective_c())
self.assertFalse(cpp_style._FileState(clean_lines, 'cc').is_c_or_objective_c())
self.assertFalse(cpp_style._FileState(clean_lines, 'h').is_c_or_objective_c())
self.assertTrue(cpp_style._FileState(clean_objc_lines, 'h').is_c_or_objective_c())
def test_parameter(self):
# Test type.
parameter = cpp_style.Parameter('ExceptionCode', 13, 1)
self.assertEqual(parameter.type, 'ExceptionCode')
self.assertEqual(parameter.name, '')
self.assertEqual(parameter.row, 1)
# Test type and name.
parameter = cpp_style.Parameter('PassRefPtr<MyClass> parent', 19, 1)
self.assertEqual(parameter.type, 'PassRefPtr<MyClass>')
self.assertEqual(parameter.name, 'parent')
self.assertEqual(parameter.row, 1)
# Test type, no name, with default value.
parameter = cpp_style.Parameter('MyClass = 0', 7, 0)
self.assertEqual(parameter.type, 'MyClass')
self.assertEqual(parameter.name, '')
self.assertEqual(parameter.row, 0)
# Test type, name, and default value.
parameter = cpp_style.Parameter('MyClass a = 0', 7, 0)
self.assertEqual(parameter.type, 'MyClass')
self.assertEqual(parameter.name, 'a')
self.assertEqual(parameter.row, 0)
def test_single_line_view(self):
start_position = cpp_style.Position(row=1, column=1)
end_position = cpp_style.Position(row=3, column=1)
single_line_view = cpp_style.SingleLineView(['0', 'abcde', 'fgh', 'i'], start_position, end_position)
self.assertEqual(single_line_view.single_line, 'bcde fgh i')
self.assertEqual(single_line_view.convert_column_to_row(0), 1)
self.assertEqual(single_line_view.convert_column_to_row(4), 1)
self.assertEqual(single_line_view.convert_column_to_row(5), 2)
self.assertEqual(single_line_view.convert_column_to_row(8), 2)
self.assertEqual(single_line_view.convert_column_to_row(9), 3)
self.assertEqual(single_line_view.convert_column_to_row(100), 3)
start_position = cpp_style.Position(row=0, column=3)
end_position = cpp_style.Position(row=0, column=4)
single_line_view = cpp_style.SingleLineView(['abcdef'], start_position, end_position)
self.assertEqual(single_line_view.single_line, 'd')
def test_create_skeleton_parameters(self):
self.assertEqual(cpp_style.create_skeleton_parameters(''), '')
self.assertEqual(cpp_style.create_skeleton_parameters(' '), ' ')
self.assertEqual(cpp_style.create_skeleton_parameters('long'), 'long,')
self.assertEqual(cpp_style.create_skeleton_parameters('const unsigned long int'), ' int,')
self.assertEqual(cpp_style.create_skeleton_parameters('long int*'), ' int ,')
self.assertEqual(cpp_style.create_skeleton_parameters('PassRefPtr<Foo> a'), 'PassRefPtr a,')
self.assertEqual(cpp_style.create_skeleton_parameters(
'ComplexTemplate<NestedTemplate1<MyClass1, MyClass2>, NestedTemplate1<MyClass1, MyClass2> > param, int second'),
'ComplexTemplate param, int second,')
self.assertEqual(cpp_style.create_skeleton_parameters('int = 0, Namespace::Type& a'), 'int , Type a,')
# Create skeleton parameters is a bit too aggressive with function variables, but
# it allows for parsing other parameters and declarations like this are rare.
self.assertEqual(cpp_style.create_skeleton_parameters('void (*fn)(int a, int b), Namespace::Type& a'),
'void , Type a,')
# This doesn't look like functions declarations but the simplifications help to eliminate false positives.
self.assertEqual(cpp_style.create_skeleton_parameters('b{d}'), 'b ,')
def test_find_parameter_name_index(self):
self.assertEqual(cpp_style.find_parameter_name_index(' int a '), 5)
self.assertEqual(cpp_style.find_parameter_name_index(' PassRefPtr '), 16)
self.assertEqual(cpp_style.find_parameter_name_index('double'), 6)
def test_parameter_list(self):
elided_lines = ['int blah(PassRefPtr<MyClass> paramName,',
'const Other1Class& foo,',
'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const * param = new ComplexTemplate<Class1, NestedTemplate<P1, P2> >(34, 42),',
'int* myCount = 0);']
start_position = cpp_style.Position(row=0, column=8)
end_position = cpp_style.Position(row=3, column=16)
expected_parameters = ({'type': 'PassRefPtr<MyClass>', 'name': 'paramName', 'row': 0},
{'type': 'const Other1Class&', 'name': 'foo', 'row': 1},
{'type': 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const *', 'name': 'param', 'row': 2},
{'type': 'int*', 'name': 'myCount', 'row': 3})
index = 0
for parameter in cpp_style.parameter_list(elided_lines, start_position, end_position):
expected_parameter = expected_parameters[index]
self.assertEqual(parameter.type, expected_parameter['type'])
self.assertEqual(parameter.name, expected_parameter['name'])
self.assertEqual(parameter.row, expected_parameter['row'])
index += 1
self.assertEqual(index, len(expected_parameters))
def test_check_parameter_against_text(self):
error_collector = ErrorCollector(self.assertTrue)
parameter = cpp_style.Parameter('FooF ooF', 4, 1)
self.assertFalse(cpp_style._check_parameter_name_against_text(parameter, 'FooF', error_collector))
self.assertEqual(error_collector.results(),
'The parameter name "ooF" adds no information, so it should be removed. [readability/parameter_name] [5]')
class CppStyleTestBase(unittest.TestCase):
"""Provides some useful helper functions for cpp_style tests.
Attributes:
min_confidence: An integer that is the current minimum confidence
level for the tests.
"""
# FIXME: Refactor the unit tests so the confidence level is passed
# explicitly, just like it is in the real code.
min_confidence = 1;
# Helper function to avoid needing to explicitly pass confidence
# in all the unit test calls to cpp_style.process_file_data().
def process_file_data(self, filename, file_extension, lines, error, fs=None):
"""Call cpp_style.process_file_data() with the min_confidence."""
return cpp_style.process_file_data(filename, file_extension, lines,
error, self.min_confidence, fs)
def perform_lint(self, code, filename, basic_error_rules, fs=None, lines_to_check=None):
error_collector = ErrorCollector(self.assertTrue, FilterConfiguration(basic_error_rules), lines_to_check)
lines = code.split('\n')
extension = filename.split('.')[1]
self.process_file_data(filename, extension, lines, error_collector, fs)
return error_collector.results()
# Perform lint on single line of input and return the error message.
def perform_single_line_lint(self, code, filename):
basic_error_rules = ('-build/header_guard',
'-legal/copyright',
'-readability/fn_size',
'-readability/parameter_name',
'-readability/pass_ptr',
'-whitespace/ending_newline')
return self.perform_lint(code, filename, basic_error_rules)
# Perform lint over multiple lines and return the error message.
def perform_multi_line_lint(self, code, file_extension):
basic_error_rules = ('-build/header_guard',
'-legal/copyright',
'-readability/parameter_name',
'-whitespace/ending_newline')
return self.perform_lint(code, 'test.' + file_extension, basic_error_rules)
# Only keep some errors related to includes, namespaces and rtti.
def perform_language_rules_check(self, filename, code, lines_to_check=None):
basic_error_rules = ('-',
'+build/include',
'+build/include_order',
'+build/namespaces',
'+runtime/rtti')
return self.perform_lint(code, filename, basic_error_rules, lines_to_check=lines_to_check)
# Only keep function length errors.
def perform_function_lengths_check(self, code):
basic_error_rules = ('-',
'+readability/fn_size')
return self.perform_lint(code, 'test.cpp', basic_error_rules)
# Only keep pass ptr errors.
def perform_pass_ptr_check(self, code):
basic_error_rules = ('-',
'+readability/pass_ptr')
return self.perform_lint(code, 'test.cpp', basic_error_rules)
# Only keep leaky pattern errors.
def perform_leaky_pattern_check(self, code):
basic_error_rules = ('-',
'+runtime/leaky_pattern')
return self.perform_lint(code, 'test.cpp', basic_error_rules)
# Only include what you use errors.
def perform_include_what_you_use(self, code, filename='foo.h', fs=None):
basic_error_rules = ('-',
'+build/include_what_you_use')
return self.perform_lint(code, filename, basic_error_rules, fs)
def perform_avoid_static_cast_of_objects(self, code, filename='foo.cpp', fs=None):
basic_error_rules = ('-',
'+runtime/casting')
return self.perform_lint(code, filename, basic_error_rules, fs)
# Perform lint and compare the error message with "expected_message".
def assert_lint(self, code, expected_message, file_name='foo.cpp'):
self.assertEqual(expected_message, self.perform_single_line_lint(code, file_name))
def assert_lint_one_of_many_errors_re(self, code, expected_message_re, file_name='foo.cpp'):
messages = self.perform_single_line_lint(code, file_name)
for message in messages:
if re.search(expected_message_re, message):
return
self.assertEqual(expected_message_re, messages)
def assert_multi_line_lint(self, code, expected_message, file_name='foo.h'):
file_extension = file_name[file_name.rfind('.') + 1:]
self.assertEqual(expected_message, self.perform_multi_line_lint(code, file_extension))
def assert_multi_line_lint_re(self, code, expected_message_re, file_name='foo.h'):
file_extension = file_name[file_name.rfind('.') + 1:]
message = self.perform_multi_line_lint(code, file_extension)
if not re.search(expected_message_re, message):
self.fail('Message was:\n' + message + 'Expected match to "' + expected_message_re + '"')
def assert_language_rules_check(self, file_name, code, expected_message, lines_to_check=None):
self.assertEqual(expected_message,
self.perform_language_rules_check(file_name, code, lines_to_check))
def assert_include_what_you_use(self, code, expected_message):
self.assertEqual(expected_message,
self.perform_include_what_you_use(code))
def assert_blank_lines_check(self, lines, start_errors, end_errors):
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data('foo.cpp', 'cpp', lines, error_collector)
self.assertEqual(
start_errors,
error_collector.results().count(
'Blank line at the start of a code block. Is this needed?'
' [whitespace/blank_line] [2]'))
self.assertEqual(
end_errors,
error_collector.results().count(
'Blank line at the end of a code block. Is this needed?'
' [whitespace/blank_line] [3]'))
def assert_positions_equal(self, position, tuple_position):
"""Checks if the two positions are equal.
position: a cpp_style.Position object.
tuple_position: a tuple (row, column) to compare against."""
self.assertEqual(position, cpp_style.Position(tuple_position[0], tuple_position[1]),
'position %s, tuple_position %s' % (position, tuple_position))
class FunctionDetectionTest(CppStyleTestBase):
def perform_function_detection(self, lines, function_information, detection_line=0):
clean_lines = cpp_style.CleansedLines(lines)
function_state = cpp_style._FunctionState(5)
error_collector = ErrorCollector(self.assertTrue)
cpp_style.detect_functions(clean_lines, detection_line, function_state, error_collector)
if not function_information:
self.assertEqual(function_state.in_a_function, False)
return
self.assertEqual(function_state.in_a_function, True)
self.assertEqual(function_state.current_function, function_information['name'] + '()')
self.assertEqual(function_state.modifiers_and_return_type(), function_information['modifiers_and_return_type'])
self.assertEqual(function_state.is_pure, function_information['is_pure'])
self.assertEqual(function_state.is_declaration, function_information['is_declaration'])
self.assert_positions_equal(function_state.function_name_start_position, function_information['function_name_start_position'])
self.assert_positions_equal(function_state.parameter_start_position, function_information['parameter_start_position'])
self.assert_positions_equal(function_state.parameter_end_position, function_information['parameter_end_position'])
self.assert_positions_equal(function_state.body_start_position, function_information['body_start_position'])
self.assert_positions_equal(function_state.end_position, function_information['end_position'])
expected_parameters = function_information.get('parameter_list')
if expected_parameters:
actual_parameters = function_state.parameter_list()
self.assertEqual(len(actual_parameters), len(expected_parameters))
for index in range(len(expected_parameters)):
actual_parameter = actual_parameters[index]
expected_parameter = expected_parameters[index]
self.assertEqual(actual_parameter.type, expected_parameter['type'])
self.assertEqual(actual_parameter.name, expected_parameter['name'])
self.assertEqual(actual_parameter.row, expected_parameter['row'])
def test_basic_function_detection(self):
self.perform_function_detection(
['void theTestFunctionName(int) {',
'}'],
{'name': 'theTestFunctionName',
'modifiers_and_return_type': 'void',
'function_name_start_position': (0, 5),
'parameter_start_position': (0, 24),
'parameter_end_position': (0, 29),
'body_start_position': (0, 30),
'end_position': (1, 1),
'is_pure': False,
'is_declaration': False})
def test_function_declaration_detection(self):
self.perform_function_detection(
['void aFunctionName(int);'],
{'name': 'aFunctionName',
'modifiers_and_return_type': 'void',
'function_name_start_position': (0, 5),
'parameter_start_position': (0, 18),
'parameter_end_position': (0, 23),
'body_start_position': (0, 23),
'end_position': (0, 24),
'is_pure': False,
'is_declaration': True})
self.perform_function_detection(
['CheckedInt<T> operator /(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
{'name': 'operator /',
'modifiers_and_return_type': 'CheckedInt<T>',
'function_name_start_position': (0, 14),
'parameter_start_position': (0, 24),
'parameter_end_position': (0, 76),
'body_start_position': (0, 76),
'end_position': (0, 77),
'is_pure': False,
'is_declaration': True})
self.perform_function_detection(
['CheckedInt<T> operator -(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
{'name': 'operator -',
'modifiers_and_return_type': 'CheckedInt<T>',
'function_name_start_position': (0, 14),
'parameter_start_position': (0, 24),
'parameter_end_position': (0, 76),
'body_start_position': (0, 76),
'end_position': (0, 77),
'is_pure': False,
'is_declaration': True})
self.perform_function_detection(
['CheckedInt<T> operator !=(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
{'name': 'operator !=',
'modifiers_and_return_type': 'CheckedInt<T>',
'function_name_start_position': (0, 14),
'parameter_start_position': (0, 25),
'parameter_end_position': (0, 77),
'body_start_position': (0, 77),
'end_position': (0, 78),
'is_pure': False,
'is_declaration': True})
self.perform_function_detection(
['CheckedInt<T> operator +(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
{'name': 'operator +',
'modifiers_and_return_type': 'CheckedInt<T>',
'function_name_start_position': (0, 14),
'parameter_start_position': (0, 24),
'parameter_end_position': (0, 76),
'body_start_position': (0, 76),
'end_position': (0, 77),
'is_pure': False,
'is_declaration': True})
def test_pure_function_detection(self):
self.perform_function_detection(
['virtual void theTestFunctionName(int = 0);'],
{'name': 'theTestFunctionName',
'modifiers_and_return_type': 'virtual void',
'function_name_start_position': (0, 13),
'parameter_start_position': (0, 32),
'parameter_end_position': (0, 41),
'body_start_position': (0, 41),
'end_position': (0, 42),
'is_pure': False,
'is_declaration': True})
self.perform_function_detection(
['virtual void theTestFunctionName(int) = 0;'],
{'name': 'theTestFunctionName',
'modifiers_and_return_type': 'virtual void',
'function_name_start_position': (0, 13),
'parameter_start_position': (0, 32),
'parameter_end_position': (0, 37),
'body_start_position': (0, 41),
'end_position': (0, 42),
'is_pure': True,
'is_declaration': True})
# Hopefully, no one writes code like this but it is a tricky case.
self.perform_function_detection(
['virtual void theTestFunctionName(int)',
' = ',
' 0 ;'],
{'name': 'theTestFunctionName',
'modifiers_and_return_type': 'virtual void',
'function_name_start_position': (0, 13),
'parameter_start_position': (0, 32),
'parameter_end_position': (0, 37),
'body_start_position': (2, 3),
'end_position': (2, 4),
'is_pure': True,
'is_declaration': True})
def test_ignore_macros(self):
self.perform_function_detection(['void aFunctionName(int); \\'], None)
def test_non_functions(self):
# This case exposed an error because the open brace was in quotes.
self.perform_function_detection(
['asm(',
' "stmdb sp!, {r1-r3}" "\n"',
');'],
# This isn't a function but it looks like one to our simple
# algorithm and that is ok.
{'name': 'asm',
'modifiers_and_return_type': '',
'function_name_start_position': (0, 0),
'parameter_start_position': (0, 3),
'parameter_end_position': (2, 1),
'body_start_position': (2, 1),
'end_position': (2, 2),
'is_pure': False,
'is_declaration': True})
# Simple test case with something that is not a function.
self.perform_function_detection(['class Stuff;'], None)
def test_parameter_list(self):
# A function with no arguments.
function_state = self.perform_function_detection(
['void functionName();'],
{'name': 'functionName',
'modifiers_and_return_type': 'void',
'function_name_start_position': (0, 5),
'parameter_start_position': (0, 17),
'parameter_end_position': (0, 19),
'body_start_position': (0, 19),
'end_position': (0, 20),
'is_pure': False,
'is_declaration': True,
'parameter_list': ()})
# A function with one argument.
function_state = self.perform_function_detection(
['void functionName(int);'],
{'name': 'functionName',
'modifiers_and_return_type': 'void',
'function_name_start_position': (0, 5),
'parameter_start_position': (0, 17),
'parameter_end_position': (0, 22),
'body_start_position': (0, 22),
'end_position': (0, 23),
'is_pure': False,
'is_declaration': True,
'parameter_list':
({'type': 'int', 'name': '', 'row': 0},)})
# A function with unsigned and short arguments
function_state = self.perform_function_detection(
['void functionName(unsigned a, short b, long c, long long short unsigned int);'],
{'name': 'functionName',
'modifiers_and_return_type': 'void',
'function_name_start_position': (0, 5),
'parameter_start_position': (0, 17),
'parameter_end_position': (0, 76),
'body_start_position': (0, 76),
'end_position': (0, 77),
'is_pure': False,
'is_declaration': True,
'parameter_list':
({'type': 'unsigned', 'name': 'a', 'row': 0},
{'type': 'short', 'name': 'b', 'row': 0},
{'type': 'long', 'name': 'c', 'row': 0},
{'type': 'long long short unsigned int', 'name': '', 'row': 0})})
# Some parameter type with modifiers and no parameter names.
function_state = self.perform_function_detection(
['virtual void determineARIADropEffects(Vector<String>*&, const unsigned long int*&, const MediaPlayer::Preload, Other<Other2, Other3<P1, P2> >, int);'],
{'name': 'determineARIADropEffects',
'modifiers_and_return_type': 'virtual void',
'parameter_start_position': (0, 37),
'function_name_start_position': (0, 13),
'parameter_end_position': (0, 147),
'body_start_position': (0, 147),
'end_position': (0, 148),
'is_pure': False,
'is_declaration': True,
'parameter_list':
({'type': 'Vector<String>*&', 'name': '', 'row': 0},
{'type': 'const unsigned long int*&', 'name': '', 'row': 0},
{'type': 'const MediaPlayer::Preload', 'name': '', 'row': 0},
{'type': 'Other<Other2, Other3<P1, P2> >', 'name': '', 'row': 0},
{'type': 'int', 'name': '', 'row': 0})})
# Try parsing a function with a very complex definition.
function_state = self.perform_function_detection(
['#define MyMacro(a) a',
'virtual',
'AnotherTemplate<Class1, Class2> aFunctionName(PassRefPtr<MyClass> paramName,',
'const Other1Class& foo,',
'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const * param = new ComplexTemplate<Class1, NestedTemplate<P1, P2> >(34, 42),',
'int* myCount = 0);'],
{'name': 'aFunctionName',
'modifiers_and_return_type': 'virtual AnotherTemplate<Class1, Class2>',
'function_name_start_position': (2, 32),
'parameter_start_position': (2, 45),
'parameter_end_position': (5, 17),
'body_start_position': (5, 17),
'end_position': (5, 18),
'is_pure': False,
'is_declaration': True,
'parameter_list':
({'type': 'PassRefPtr<MyClass>', 'name': 'paramName', 'row': 2},
{'type': 'const Other1Class&', 'name': 'foo', 'row': 3},
{'type': 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const *', 'name': 'param', 'row': 4},
{'type': 'int*', 'name': 'myCount', 'row': 5})},
detection_line=2)
class CppStyleTest(CppStyleTestBase):
def test_asm_lines_ignored(self):
self.assert_lint(
'__asm mov [registration], eax',
'')
# Test get line width.
def test_get_line_width(self):
self.assertEqual(0, cpp_style.get_line_width(''))
self.assertEqual(10, cpp_style.get_line_width(u'x' * 10))
self.assertEqual(16, cpp_style.get_line_width(u'都|道|府|県|支庁'))
def test_find_next_multi_line_comment_start(self):
self.assertEqual(1, cpp_style.find_next_multi_line_comment_start([''], 0))
lines = ['a', 'b', '/* c']
self.assertEqual(2, cpp_style.find_next_multi_line_comment_start(lines, 0))
lines = ['char a[] = "/*";'] # not recognized as comment.
self.assertEqual(1, cpp_style.find_next_multi_line_comment_start(lines, 0))
def test_find_next_multi_line_comment_end(self):
self.assertEqual(1, cpp_style.find_next_multi_line_comment_end([''], 0))
lines = ['a', 'b', ' c */']
self.assertEqual(2, cpp_style.find_next_multi_line_comment_end(lines, 0))
def test_remove_multi_line_comments_from_range(self):
lines = ['a', ' /* comment ', ' * still comment', ' comment */ ', 'b']
cpp_style.remove_multi_line_comments_from_range(lines, 1, 4)
self.assertEqual(['a', '// dummy', '// dummy', '// dummy', 'b'], lines)
def test_position(self):
position = cpp_style.Position(3, 4)
self.assert_positions_equal(position, (3, 4))
self.assertEqual(position.row, 3)
self.assertTrue(position > cpp_style.Position(position.row - 1, position.column + 1))
self.assertTrue(position > cpp_style.Position(position.row, position.column - 1))
self.assertTrue(position < cpp_style.Position(position.row, position.column + 1))
self.assertTrue(position < cpp_style.Position(position.row + 1, position.column - 1))
self.assertEqual(position.__str__(), '(3, 4)')
def test_rfind_in_lines(self):
not_found_position = cpp_style.Position(10, 11)
start_position = cpp_style.Position(2, 2)
lines = ['ab', 'ace', 'test']
self.assertEqual(not_found_position, cpp_style._rfind_in_lines('st', lines, start_position, not_found_position))
self.assertTrue(cpp_style.Position(1, 1) == cpp_style._rfind_in_lines('a', lines, start_position, not_found_position))
self.assertEqual(cpp_style.Position(2, 2), cpp_style._rfind_in_lines('(te|a)', lines, start_position, not_found_position))
def test_close_expression(self):
self.assertEqual(cpp_style.Position(1, -1), cpp_style.close_expression([')('], cpp_style.Position(0, 1)))
self.assertEqual(cpp_style.Position(1, -1), cpp_style.close_expression([') ()'], cpp_style.Position(0, 1)))
self.assertEqual(cpp_style.Position(0, 4), cpp_style.close_expression([')[)]'], cpp_style.Position(0, 1)))
self.assertEqual(cpp_style.Position(0, 5), cpp_style.close_expression(['}{}{}'], cpp_style.Position(0, 3)))
self.assertEqual(cpp_style.Position(1, 1), cpp_style.close_expression(['}{}{', '}'], cpp_style.Position(0, 3)))
self.assertEqual(cpp_style.Position(2, -1), cpp_style.close_expression(['][][', ' '], cpp_style.Position(0, 3)))
def test_spaces_at_end_of_line(self):
self.assert_lint(
'// Hello there ',
'Line ends in whitespace. Consider deleting these extra spaces.'
' [whitespace/end_of_line] [4]')
# Test C-style cast cases.
def test_cstyle_cast(self):
self.assert_lint(
'int a = (int)1.0;',
'Using C-style cast. Use static_cast<int>(...) instead'
' [readability/casting] [4]')
self.assert_lint(
'int *a = (int *)DEFINED_VALUE;',
'Using C-style cast. Use reinterpret_cast<int *>(...) instead'
' [readability/casting] [4]', 'foo.c')
self.assert_lint(
'uint16 a = (uint16)1.0;',
'Using C-style cast. Use static_cast<uint16>(...) instead'
' [readability/casting] [4]')
self.assert_lint(
'int32 a = (int32)1.0;',
'Using C-style cast. Use static_cast<int32>(...) instead'
' [readability/casting] [4]')
self.assert_lint(
'uint64 a = (uint64)1.0;',
'Using C-style cast. Use static_cast<uint64>(...) instead'
' [readability/casting] [4]')
# Test taking address of casts (runtime/casting)
def test_runtime_casting(self):
self.assert_lint(
'int* x = &static_cast<int*>(foo);',
'Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'
' [runtime/casting] [4]')
self.assert_lint(
'int* x = &dynamic_cast<int *>(foo);',
['Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'
' [runtime/casting] [4]',
'Do not use dynamic_cast<>. If you need to cast within a class '
'hierarchy, use static_cast<> to upcast. Google doesn\'t support '
'RTTI. [runtime/rtti] [5]'])
self.assert_lint(
'int* x = &reinterpret_cast<int *>(foo);',
'Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'
' [runtime/casting] [4]')
# It's OK to cast an address.
self.assert_lint(
'int* x = reinterpret_cast<int *>(&foo);',
'')
def test_runtime_selfinit(self):
self.assert_lint(
'Foo::Foo(Bar r, Bel l) : r_(r_), l_(l_) { }',
'You seem to be initializing a member variable with itself.'
' [runtime/init] [4]')
self.assert_lint(
'Foo::Foo(Bar r, Bel l) : r_(r), l_(l) { }',
'')
self.assert_lint(
'Foo::Foo(Bar r) : r_(r), l_(r_), ll_(l_) { }',
'')
def test_runtime_rtti(self):
statement = 'int* x = dynamic_cast<int*>(&foo);'
error_message = (
'Do not use dynamic_cast<>. If you need to cast within a class '
'hierarchy, use static_cast<> to upcast. Google doesn\'t support '
'RTTI. [runtime/rtti] [5]')
# dynamic_cast is disallowed in most files.
self.assert_language_rules_check('foo.cpp', statement, error_message)
self.assert_language_rules_check('foo.h', statement, error_message)
# Tests for static_cast readability.
def test_static_cast_on_objects_with_toFoo(self):
mock_header_contents = ['inline Foo* toFoo(Bar* bar)']
fs = FileSystem()
orig_read_text_file_fn = fs.read_text_file
def mock_read_text_file_fn(path):
return mock_header_contents
try:
fs.read_text_file = mock_read_text_file_fn
message = self.perform_avoid_static_cast_of_objects(
'Foo* x = static_cast<Foo*>(bar);',
filename='casting.cpp',
fs=fs)
self.assertEqual(message, 'static_cast of class objects is not allowed. Use toFoo defined in Foo.h.'
' [runtime/casting] [4]')
finally:
fs.read_text_file = orig_read_text_file_fn
def test_static_cast_on_objects_without_toFoo(self):
mock_header_contents = ['inline FooBar* toFooBar(Bar* bar)']
fs = FileSystem()
orig_read_text_file_fn = fs.read_text_file
def mock_read_text_file_fn(path):
return mock_header_contents
try:
fs.read_text_file = mock_read_text_file_fn
message = self.perform_avoid_static_cast_of_objects(
'Foo* x = static_cast<Foo*>(bar);',
filename='casting.cpp',
fs=fs)
self.assertEqual(message, 'static_cast of class objects is not allowed. Add toFoo in Foo.h and use it instead.'
' [runtime/casting] [4]')
finally:
fs.read_text_file = orig_read_text_file_fn
# We cannot test this functionality because of difference of
# function definitions. Anyway, we may never enable this.
#
# # Test for unnamed arguments in a method.
# def test_check_for_unnamed_params(self):
# message = ('All parameters should be named in a function'
# ' [readability/function] [3]')
# self.assert_lint('virtual void A(int*) const;', message)
# self.assert_lint('virtual void B(void (*fn)(int*));', message)
# self.assert_lint('virtual void C(int*);', message)
# self.assert_lint('void *(*f)(void *) = x;', message)
# self.assert_lint('void Method(char*) {', message)
# self.assert_lint('void Method(char*);', message)
# self.assert_lint('void Method(char* /*x*/);', message)
# self.assert_lint('typedef void (*Method)(int32);', message)
# self.assert_lint('static void operator delete[](void*) throw();', message)
#
# self.assert_lint('virtual void D(int* p);', '')
# self.assert_lint('void operator delete(void* x) throw();', '')
# self.assert_lint('void Method(char* x)\n{', '')
# self.assert_lint('void Method(char* /*x*/)\n{', '')
# self.assert_lint('void Method(char* x);', '')
# self.assert_lint('typedef void (*Method)(int32 x);', '')
# self.assert_lint('static void operator delete[](void* x) throw();', '')
# self.assert_lint('static void operator delete[](void* /*x*/) throw();', '')
#
# # This one should technically warn, but doesn't because the function
# # pointer is confusing.
# self.assert_lint('virtual void E(void (*fn)(int* p));', '')
# Test deprecated casts such as int(d)
def test_deprecated_cast(self):
self.assert_lint(
'int a = int(2.2);',
'Using deprecated casting style. '
'Use static_cast<int>(...) instead'
' [readability/casting] [4]')
# Checks for false positives...
self.assert_lint(
'int a = int(); // Constructor, o.k.',
'')
self.assert_lint(
'X::X() : a(int()) { } // default Constructor, o.k.',
'')
self.assert_lint(
'operator bool(); // Conversion operator, o.k.',
'')
# The second parameter to a gMock method definition is a function signature
# that often looks like a bad cast but should not picked up by lint.
def test_mock_method(self):
self.assert_lint(
'MOCK_METHOD0(method, int());',
'')
self.assert_lint(
'MOCK_CONST_METHOD1(method, float(string));',
'')
self.assert_lint(
'MOCK_CONST_METHOD2_T(method, double(float, float));',
'')
# Test sizeof(type) cases.
def test_sizeof_type(self):
self.assert_lint(
'sizeof(int);',
'Using sizeof(type). Use sizeof(varname) instead if possible'
' [runtime/sizeof] [1]')
self.assert_lint(
'sizeof(int *);',
'Using sizeof(type). Use sizeof(varname) instead if possible'
' [runtime/sizeof] [1]')
# Test typedef cases. There was a bug that cpp_style misidentified
# typedef for pointer to function as C-style cast and produced
# false-positive error messages.
def test_typedef_for_pointer_to_function(self):
self.assert_lint(
'typedef void (*Func)(int x);',
'')
self.assert_lint(
'typedef void (*Func)(int *x);',
'')
self.assert_lint(
'typedef void Func(int x);',
'')
self.assert_lint(
'typedef void Func(int *x);',
'')
def test_include_what_you_use_no_implementation_files(self):
code = 'std::vector<int> foo;'
self.assertEqual('Add #include <vector> for vector<>'
' [build/include_what_you_use] [4]',
self.perform_include_what_you_use(code, 'foo.h'))
self.assertEqual('',
self.perform_include_what_you_use(code, 'foo.cpp'))
def test_include_what_you_use(self):
self.assert_include_what_you_use(
'''#include <vector>
std::vector<int> foo;
''',
'')
self.assert_include_what_you_use(
'''#include <map>
std::pair<int,int> foo;
''',
'')
self.assert_include_what_you_use(
'''#include <multimap>
std::pair<int,int> foo;
''',
'')
self.assert_include_what_you_use(
'''#include <hash_map>
std::pair<int,int> foo;
''',
'')
self.assert_include_what_you_use(
'''#include <utility>
std::pair<int,int> foo;
''',
'')
self.assert_include_what_you_use(
'''#include <vector>
DECLARE_string(foobar);
''',
'')
self.assert_include_what_you_use(
'''#include <vector>
DEFINE_string(foobar, "", "");
''',
'')
self.assert_include_what_you_use(
'''#include <vector>
std::pair<int,int> foo;
''',
'Add #include <utility> for pair<>'
' [build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''#include "base/foobar.h"
std::vector<int> foo;
''',
'Add #include <vector> for vector<>'
' [build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''#include <vector>
std::set<int> foo;
''',
'Add #include <set> for set<>'
' [build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''#include "base/foobar.h"
hash_map<int, int> foobar;
''',
'Add #include <hash_map> for hash_map<>'
' [build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''#include "base/foobar.h"
bool foobar = std::less<int>(0,1);
''',
'Add #include <functional> for less<>'
' [build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''#include "base/foobar.h"
bool foobar = min<int>(0,1);
''',
'Add #include <algorithm> for min [build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'void a(const string &foobar);',
'Add #include <string> for string [build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''#include "base/foobar.h"
bool foobar = swap(0,1);
''',
'Add #include <algorithm> for swap [build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''#include "base/foobar.h"
bool foobar = transform(a.begin(), a.end(), b.start(), Foo);
''',
'Add #include <algorithm> for transform '
'[build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''#include "base/foobar.h"
bool foobar = min_element(a.begin(), a.end());
''',
'Add #include <algorithm> for min_element '
'[build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''foo->swap(0,1);
foo.swap(0,1);
''',
'')
self.assert_include_what_you_use(
'''#include <string>
void a(const std::multimap<int,string> &foobar);
''',
'Add #include <map> for multimap<>'
' [build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''#include <queue>
void a(const std::priority_queue<int> &foobar);
''',
'')
self.assert_include_what_you_use(
'''#include "base/basictypes.h"
#include "base/port.h"
#include <assert.h>
#include <string>
#include <vector>
vector<string> hajoa;''', '')
self.assert_include_what_you_use(
'''#include <string>
int i = numeric_limits<int>::max()
''',
'Add #include <limits> for numeric_limits<>'
' [build/include_what_you_use] [4]')
self.assert_include_what_you_use(
'''#include <limits>
int i = numeric_limits<int>::max()
''',
'')
# Test the UpdateIncludeState code path.
mock_header_contents = ['#include "blah/foo.h"', '#include "blah/bar.h"']
fs = FileSystem()
orig_read_text_file_fn = fs.read_text_file
def mock_read_text_file_fn(path):
return mock_header_contents
try:
fs.read_text_file = mock_read_text_file_fn
message = self.perform_include_what_you_use(
'#include "config.h"\n'
'#include "blah/a.h"\n',
filename='blah/a.cpp',
fs=fs)
self.assertEqual(message, '')
mock_header_contents = ['#include <set>']
message = self.perform_include_what_you_use(
'''#include "config.h"
#include "blah/a.h"
std::set<int> foo;''',
filename='blah/a.cpp',
fs=fs)
self.assertEqual(message, '')
# If there's just a .cpp and the header can't be found then it's ok.
message = self.perform_include_what_you_use(
'''#include "config.h"
#include "blah/a.h"
std::set<int> foo;''',
filename='blah/a.cpp')
self.assertEqual(message, '')
# Make sure we find the headers with relative paths.
mock_header_contents = ['']
message = self.perform_include_what_you_use(
'''#include "config.h"
#include "%s%sa.h"
std::set<int> foo;''' % (os.path.basename(os.getcwd()), os.path.sep),
filename='a.cpp',
fs=fs)
self.assertEqual(message, 'Add #include <set> for set<> '
'[build/include_what_you_use] [4]')
finally:
fs.read_text_file = orig_read_text_file_fn
def test_files_belong_to_same_module(self):
f = cpp_style.files_belong_to_same_module
self.assertEqual((True, ''), f('a.cpp', 'a.h'))
self.assertEqual((True, ''), f('base/google.cpp', 'base/google.h'))
self.assertEqual((True, ''), f('base/google_test.cpp', 'base/google.h'))
self.assertEqual((True, ''),
f('base/google_unittest.cpp', 'base/google.h'))
self.assertEqual((True, ''),
f('base/internal/google_unittest.cpp',
'base/public/google.h'))
self.assertEqual((True, 'xxx/yyy/'),
f('xxx/yyy/base/internal/google_unittest.cpp',
'base/public/google.h'))
self.assertEqual((True, 'xxx/yyy/'),
f('xxx/yyy/base/google_unittest.cpp',
'base/public/google.h'))
self.assertEqual((True, ''),
f('base/google_unittest.cpp', 'base/google-inl.h'))
self.assertEqual((True, '/home/build/google3/'),
f('/home/build/google3/base/google.cpp', 'base/google.h'))
self.assertEqual((False, ''),
f('/home/build/google3/base/google.cpp', 'basu/google.h'))
self.assertEqual((False, ''), f('a.cpp', 'b.h'))
def test_cleanse_line(self):
self.assertEqual('int foo = 0; ',
cpp_style.cleanse_comments('int foo = 0; // danger!'))
self.assertEqual('int o = 0;',
cpp_style.cleanse_comments('int /* foo */ o = 0;'))
self.assertEqual('foo(int a, int b);',
cpp_style.cleanse_comments('foo(int a /* abc */, int b);'))
self.assertEqual('f(a, b);',
cpp_style.cleanse_comments('f(a, /* name */ b);'))
self.assertEqual('f(a, b);',
cpp_style.cleanse_comments('f(a /* name */, b);'))
self.assertEqual('f(a, b);',
cpp_style.cleanse_comments('f(a, /* name */b);'))
def test_multi_line_comments(self):
# missing explicit is bad
self.assert_multi_line_lint(
r'''int a = 0;
/* multi-liner
class Foo {
Foo(int f); // should cause a lint warning in code
}
*/ ''',
'')
self.assert_multi_line_lint(
'''\
/* int a = 0; multi-liner
static const int b = 0;''',
['Could not find end of multi-line comment'
' [readability/multiline_comment] [5]',
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. Consider replacing these with '
'//-style comments, with #if 0...#endif, or with more clearly '
'structured multi-line comments. [readability/multiline_comment] [5]'])
self.assert_multi_line_lint(r''' /* multi-line comment''',
['Could not find end of multi-line comment'
' [readability/multiline_comment] [5]',
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. Consider replacing these with '
'//-style comments, with #if 0...#endif, or with more clearly '
'structured multi-line comments. [readability/multiline_comment] [5]'])
self.assert_multi_line_lint(r''' // /* comment, but not multi-line''', '')
def test_multiline_strings(self):
multiline_string_error_message = (
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. They\'re '
'ugly and unnecessary, and you should use concatenation instead".'
' [readability/multiline_string] [5]')
file_path = 'mydir/foo.cpp'
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(file_path, 'cpp',
['const char* str = "This is a\\',
' multiline string.";'],
error_collector)
self.assertEqual(
2, # One per line.
error_collector.result_list().count(multiline_string_error_message))
# Test non-explicit single-argument constructors
def test_explicit_single_argument_constructors(self):
# missing explicit is bad
self.assert_multi_line_lint(
'''\
class Foo {
Foo(int f);
};''',
'Single-argument constructors should be marked explicit.'
' [runtime/explicit] [5]')
# missing explicit is bad, even with whitespace
self.assert_multi_line_lint(
'''\
class Foo {
Foo (int f);
};''',
['Extra space before ( in function call [whitespace/parens] [4]',
'Single-argument constructors should be marked explicit.'
' [runtime/explicit] [5]'])
# missing explicit, with distracting comment, is still bad
self.assert_multi_line_lint(
'''\
class Foo {
Foo(int f); // simpler than Foo(blargh, blarg)
};''',
'Single-argument constructors should be marked explicit.'
' [runtime/explicit] [5]')
# missing explicit, with qualified classname
self.assert_multi_line_lint(
'''\
class Qualifier::AnotherOne::Foo {
Foo(int f);
};''',
'Single-argument constructors should be marked explicit.'
' [runtime/explicit] [5]')
# structs are caught as well.
self.assert_multi_line_lint(
'''\
struct Foo {
Foo(int f);
};''',
'Single-argument constructors should be marked explicit.'
' [runtime/explicit] [5]')
# Templatized classes are caught as well.
self.assert_multi_line_lint(
'''\
template<typename T> class Foo {
Foo(int f);
};''',
'Single-argument constructors should be marked explicit.'
' [runtime/explicit] [5]')
# proper style is okay
self.assert_multi_line_lint(
'''\
class Foo {
explicit Foo(int f);
};''',
'')
# two argument constructor is okay
self.assert_multi_line_lint(
'''\
class Foo {
Foo(int f, int b);
};''',
'')
# two argument constructor, across two lines, is okay
self.assert_multi_line_lint(
'''\
class Foo {
Foo(int f,
int b);
};''',
'')
# non-constructor (but similar name), is okay
self.assert_multi_line_lint(
'''\
class Foo {
aFoo(int f);
};''',
'')
# constructor with void argument is okay
self.assert_multi_line_lint(
'''\
class Foo {
Foo(void);
};''',
'')
# single argument method is okay
self.assert_multi_line_lint(
'''\
class Foo {
Bar(int b);
};''',
'')
# comments should be ignored
self.assert_multi_line_lint(
'''\
class Foo {
// Foo(int f);
};''',
'')
# single argument function following class definition is okay
# (okay, it's not actually valid, but we don't want a false positive)
self.assert_multi_line_lint(
'''\
class Foo {
Foo(int f, int b);
};
Foo(int f);''',
'')
# single argument function is okay
self.assert_multi_line_lint(
'''static Foo(int f);''',
'')
# single argument copy constructor is okay.
self.assert_multi_line_lint(
'''\
class Foo {
Foo(const Foo&);
};''',
'')
self.assert_multi_line_lint(
'''\
class Foo {
Foo(Foo&);
};''',
'')
def test_slash_star_comment_on_single_line(self):
self.assert_multi_line_lint(
'''/* static */ Foo(int f);''',
'')
self.assert_multi_line_lint(
'''/*/ static */ Foo(int f);''',
'')
self.assert_multi_line_lint(
'''/*/ static Foo(int f);''',
'Could not find end of multi-line comment'
' [readability/multiline_comment] [5]')
self.assert_multi_line_lint(
''' /*/ static Foo(int f);''',
'Could not find end of multi-line comment'
' [readability/multiline_comment] [5]')
# Test suspicious usage of "if" like this:
# if (a == b) {
# DoSomething();
# } if (a == c) { // Should be "else if".
# DoSomething(); // This gets called twice if a == b && a == c.
# }
def test_suspicious_usage_of_if(self):
self.assert_lint(
' if (a == b) {',
'')
self.assert_lint(
' } if (a == b) {',
'Did you mean "else if"? If not, start a new line for "if".'
' [readability/braces] [4]')
# Test suspicious usage of memset. Specifically, a 0
# as the final argument is almost certainly an error.
def test_suspicious_usage_of_memset(self):
# Normal use is okay.
self.assert_lint(
' memset(buf, 0, sizeof(buf))',
'')
# A 0 as the final argument is almost certainly an error.
self.assert_lint(
' memset(buf, sizeof(buf), 0)',
'Did you mean "memset(buf, 0, sizeof(buf))"?'
' [runtime/memset] [4]')
self.assert_lint(
' memset(buf, xsize * ysize, 0)',
'Did you mean "memset(buf, 0, xsize * ysize)"?'
' [runtime/memset] [4]')
# There is legitimate test code that uses this form.
# This is okay since the second argument is a literal.
self.assert_lint(
" memset(buf, 'y', 0)",
'')
self.assert_lint(
' memset(buf, 4, 0)',
'')
self.assert_lint(
' memset(buf, -1, 0)',
'')
self.assert_lint(
' memset(buf, 0xF1, 0)',
'')
self.assert_lint(
' memset(buf, 0xcd, 0)',
'')
def test_check_posix_threading(self):
self.assert_lint('sctime_r()', '')
self.assert_lint('strtok_r()', '')
self.assert_lint(' strtok_r(foo, ba, r)', '')
self.assert_lint('brand()', '')
self.assert_lint('_rand()', '')
self.assert_lint('.rand()', '')
self.assert_lint('>rand()', '')
self.assert_lint('rand()',
'Consider using rand_r(...) instead of rand(...)'
' for improved thread safety.'
' [runtime/threadsafe_fn] [2]')
self.assert_lint('strtok()',
'Consider using strtok_r(...) '
'instead of strtok(...)'
' for improved thread safety.'
' [runtime/threadsafe_fn] [2]')
# Test potential format string bugs like printf(foo).
def test_format_strings(self):
self.assert_lint('printf("foo")', '')
self.assert_lint('printf("foo: %s", foo)', '')
self.assert_lint('DocidForPrintf(docid)', '') # Should not trigger.
self.assert_lint(
'printf(foo)',
'Potential format string bug. Do printf("%s", foo) instead.'
' [runtime/printf] [4]')
self.assert_lint(
'printf(foo.c_str())',
'Potential format string bug. '
'Do printf("%s", foo.c_str()) instead.'
' [runtime/printf] [4]')
self.assert_lint(
'printf(foo->c_str())',
'Potential format string bug. '
'Do printf("%s", foo->c_str()) instead.'
' [runtime/printf] [4]')
self.assert_lint(
'StringPrintf(foo)',
'Potential format string bug. Do StringPrintf("%s", foo) instead.'
''
' [runtime/printf] [4]')
# Variable-length arrays are not permitted.
def test_variable_length_array_detection(self):
errmsg = ('Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size."
' [runtime/arrays] [1]')
self.assert_lint('int a[any_old_variable];', errmsg)
self.assert_lint('int doublesize[some_var * 2];', errmsg)
self.assert_lint('int a[afunction()];', errmsg)
self.assert_lint('int a[function(kMaxFooBars)];', errmsg)
self.assert_lint('bool aList[items_->size()];', errmsg)
self.assert_lint('namespace::Type buffer[len+1];', errmsg)
self.assert_lint('int a[64];', '')
self.assert_lint('int a[0xFF];', '')
self.assert_lint('int first[256], second[256];', '')
self.assert_lint('int arrayName[kCompileTimeConstant];', '')
self.assert_lint('char buf[somenamespace::kBufSize];', '')
self.assert_lint('int arrayName[ALL_CAPS];', '')
self.assert_lint('AClass array1[foo::bar::ALL_CAPS];', '')
self.assert_lint('int a[kMaxStrLen + 1];', '')
self.assert_lint('int a[sizeof(foo)];', '')
self.assert_lint('int a[sizeof(*foo)];', '')
self.assert_lint('int a[sizeof foo];', '')
self.assert_lint('int a[sizeof(struct Foo)];', '')
self.assert_lint('int a[128 - sizeof(const bar)];', '')
self.assert_lint('int a[(sizeof(foo) * 4)];', '')
self.assert_lint('int a[(arraysize(fixed_size_array)/2) << 1];', 'Missing spaces around / [whitespace/operators] [3]')
self.assert_lint('delete a[some_var];', '')
self.assert_lint('return a[some_var];', '')
# Brace usage
def test_braces(self):
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array
self.assert_lint('int a[3] = { 1, 2, 3 };', '')
self.assert_lint(
'''\
const int foo[] =
{1, 2, 3 };''',
'')
# For single line, unmatched '}' with a ';' is ignored (not enough context)
self.assert_multi_line_lint(
'''\
int a[3] = { 1,
2,
3 };''',
'')
self.assert_multi_line_lint(
'''\
int a[2][3] = { { 1, 2 },
{ 3, 4 } };''',
'')
self.assert_multi_line_lint(
'''\
int a[2][3] =
{ { 1, 2 },
{ 3, 4 } };''',
'')
# CHECK/EXPECT_TRUE/EXPECT_FALSE replacements
def test_check_check(self):
self.assert_lint('CHECK(x == 42)',
'Consider using CHECK_EQ instead of CHECK(a == b)'
' [readability/check] [2]')
self.assert_lint('CHECK(x != 42)',
'Consider using CHECK_NE instead of CHECK(a != b)'
' [readability/check] [2]')
self.assert_lint('CHECK(x >= 42)',
'Consider using CHECK_GE instead of CHECK(a >= b)'
' [readability/check] [2]')
self.assert_lint('CHECK(x > 42)',
'Consider using CHECK_GT instead of CHECK(a > b)'
' [readability/check] [2]')
self.assert_lint('CHECK(x <= 42)',
'Consider using CHECK_LE instead of CHECK(a <= b)'
' [readability/check] [2]')
self.assert_lint('CHECK(x < 42)',
'Consider using CHECK_LT instead of CHECK(a < b)'
' [readability/check] [2]')
self.assert_lint('DCHECK(x == 42)',
'Consider using DCHECK_EQ instead of DCHECK(a == b)'
' [readability/check] [2]')
self.assert_lint('DCHECK(x != 42)',
'Consider using DCHECK_NE instead of DCHECK(a != b)'
' [readability/check] [2]')
self.assert_lint('DCHECK(x >= 42)',
'Consider using DCHECK_GE instead of DCHECK(a >= b)'
' [readability/check] [2]')
self.assert_lint('DCHECK(x > 42)',
'Consider using DCHECK_GT instead of DCHECK(a > b)'
' [readability/check] [2]')
self.assert_lint('DCHECK(x <= 42)',
'Consider using DCHECK_LE instead of DCHECK(a <= b)'
' [readability/check] [2]')
self.assert_lint('DCHECK(x < 42)',
'Consider using DCHECK_LT instead of DCHECK(a < b)'
' [readability/check] [2]')
self.assert_lint(
'EXPECT_TRUE("42" == x)',
'Consider using EXPECT_EQ instead of EXPECT_TRUE(a == b)'
' [readability/check] [2]')
self.assert_lint(
'EXPECT_TRUE("42" != x)',
'Consider using EXPECT_NE instead of EXPECT_TRUE(a != b)'
' [readability/check] [2]')
self.assert_lint(
'EXPECT_TRUE(+42 >= x)',
'Consider using EXPECT_GE instead of EXPECT_TRUE(a >= b)'
' [readability/check] [2]')
self.assert_lint(
'EXPECT_TRUE_M(-42 > x)',
'Consider using EXPECT_GT_M instead of EXPECT_TRUE_M(a > b)'
' [readability/check] [2]')
self.assert_lint(
'EXPECT_TRUE_M(42U <= x)',
'Consider using EXPECT_LE_M instead of EXPECT_TRUE_M(a <= b)'
' [readability/check] [2]')
self.assert_lint(
'EXPECT_TRUE_M(42L < x)',
'Consider using EXPECT_LT_M instead of EXPECT_TRUE_M(a < b)'
' [readability/check] [2]')
self.assert_lint(
'EXPECT_FALSE(x == 42)',
'Consider using EXPECT_NE instead of EXPECT_FALSE(a == b)'
' [readability/check] [2]')
self.assert_lint(
'EXPECT_FALSE(x != 42)',
'Consider using EXPECT_EQ instead of EXPECT_FALSE(a != b)'
' [readability/check] [2]')
self.assert_lint(
'EXPECT_FALSE(x >= 42)',
'Consider using EXPECT_LT instead of EXPECT_FALSE(a >= b)'
' [readability/check] [2]')
self.assert_lint(
'ASSERT_FALSE(x > 42)',
'Consider using ASSERT_LE instead of ASSERT_FALSE(a > b)'
' [readability/check] [2]')
self.assert_lint(
'ASSERT_FALSE(x <= 42)',
'Consider using ASSERT_GT instead of ASSERT_FALSE(a <= b)'
' [readability/check] [2]')
self.assert_lint(
'ASSERT_FALSE_M(x < 42)',
'Consider using ASSERT_GE_M instead of ASSERT_FALSE_M(a < b)'
' [readability/check] [2]')
self.assert_lint('CHECK(some_iterator == obj.end())', '')
self.assert_lint('EXPECT_TRUE(some_iterator == obj.end())', '')
self.assert_lint('EXPECT_FALSE(some_iterator == obj.end())', '')
self.assert_lint('CHECK(CreateTestFile(dir, (1 << 20)));', '')
self.assert_lint('CHECK(CreateTestFile(dir, (1 >> 20)));', '')
self.assert_lint('CHECK(x<42)',
['Missing spaces around <'
' [whitespace/operators] [3]',
'Consider using CHECK_LT instead of CHECK(a < b)'
' [readability/check] [2]'])
self.assert_lint('CHECK(x>42)',
'Consider using CHECK_GT instead of CHECK(a > b)'
' [readability/check] [2]')
self.assert_lint(
' EXPECT_TRUE(42 < x) // Random comment.',
'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)'
' [readability/check] [2]')
self.assert_lint(
'EXPECT_TRUE( 42 < x )',
['Extra space after ( in function call'
' [whitespace/parens] [4]',
'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)'
' [readability/check] [2]'])
self.assert_lint(
'CHECK("foo" == "foo")',
'Consider using CHECK_EQ instead of CHECK(a == b)'
' [readability/check] [2]')
self.assert_lint('CHECK_EQ("foo", "foo")', '')
def test_brace_at_begin_of_line(self):
self.assert_lint('{',
'This { should be at the end of the previous line'
' [whitespace/braces] [4]')
self.assert_multi_line_lint(
'#endif\n'
'{\n'
'}\n',
'')
self.assert_multi_line_lint(
'if (condition) {',
'')
self.assert_multi_line_lint(
' MACRO1(macroArg) {',
'')
self.assert_multi_line_lint(
'ACCESSOR_GETTER(MessageEventPorts) {',
'Place brace on its own line for function definitions. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'int foo() {',
'Place brace on its own line for function definitions. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'int foo() const {',
'Place brace on its own line for function definitions. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'int foo() override {',
'Place brace on its own line for function definitions. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'int foo() final {',
'Place brace on its own line for function definitions. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'int foo() const\n'
'{\n'
'}\n',
'')
self.assert_multi_line_lint(
'int foo() override\n'
'{\n'
'}\n',
'')
self.assert_multi_line_lint(
'int foo() final\n'
'{\n'
'}\n',
'')
self.assert_multi_line_lint(
'if (condition\n'
' && condition2\n'
' && condition3) {\n'
'}\n',
'')
self.assert_multi_line_lint(
'if (condition) {\n'
' {\n'
' }\n',
'')
self.assert_multi_line_lint(
'int foo()\n'
'{\n'
' {\n'
' }\n'
'}\n',
'')
def test_mismatching_spaces_in_parens(self):
self.assert_lint('if (foo ) {', 'Extra space before ) in if'
' [whitespace/parens] [5]')
self.assert_lint('switch ( foo) {', 'Extra space after ( in switch'
' [whitespace/parens] [5]')
self.assert_lint('for (foo; ba; bar ) {', 'Extra space before ) in for'
' [whitespace/parens] [5]')
self.assert_lint('for ((foo); (ba); (bar) ) {', 'Extra space before ) in for'
' [whitespace/parens] [5]')
self.assert_lint('for (; foo; bar) {', '')
self.assert_lint('for (; (foo); (bar)) {', '')
self.assert_lint('for ( ; foo; bar) {', '')
self.assert_lint('for ( ; (foo); (bar)) {', '')
self.assert_lint('for ( ; foo; bar ) {', 'Extra space before ) in for'
' [whitespace/parens] [5]')
self.assert_lint('for ( ; (foo); (bar) ) {', 'Extra space before ) in for'
' [whitespace/parens] [5]')
self.assert_lint('for (foo; bar; ) {', '')
self.assert_lint('for ((foo); (bar); ) {', '')
self.assert_lint('foreach (foo, foos ) {', 'Extra space before ) in foreach'
' [whitespace/parens] [5]')
self.assert_lint('foreach ( foo, foos) {', 'Extra space after ( in foreach'
' [whitespace/parens] [5]')
self.assert_lint('while ( foo) {', 'Extra space after ( in while'
' [whitespace/parens] [5]')
def test_spacing_for_fncall(self):
self.assert_lint('if (foo) {', '')
self.assert_lint('for (foo;bar;baz) {', '')
self.assert_lint('foreach (foo, foos) {', '')
self.assert_lint('while (foo) {', '')
self.assert_lint('switch (foo) {', '')
self.assert_lint('new (RenderArena()) RenderInline(document())', '')
self.assert_lint('foo( bar)', 'Extra space after ( in function call'
' [whitespace/parens] [4]')
self.assert_lint('foobar( \\', '')
self.assert_lint('foobar( \\', '')
self.assert_lint('( a + b)', 'Extra space after ('
' [whitespace/parens] [2]')
self.assert_lint('((a+b))', '')
self.assert_lint('foo (foo)', 'Extra space before ( in function call'
' [whitespace/parens] [4]')
self.assert_lint('#elif (foo(bar))', '')
self.assert_lint('#elif (foo(bar) && foo(baz))', '')
self.assert_lint('typedef foo (*foo)(foo)', '')
self.assert_lint('typedef foo (*foo12bar_)(foo)', '')
self.assert_lint('typedef foo (Foo::*bar)(foo)', '')
self.assert_lint('foo (Foo::*bar)(',
'Extra space before ( in function call'
' [whitespace/parens] [4]')
self.assert_lint('typedef foo (Foo::*bar)(', '')
self.assert_lint('(foo)(bar)', '')
self.assert_lint('Foo (*foo)(bar)', '')
self.assert_lint('Foo (*foo)(Bar bar,', '')
self.assert_lint('char (*p)[sizeof(foo)] = &foo', '')
self.assert_lint('char (&ref)[sizeof(foo)] = &foo', '')
self.assert_lint('const char32 (*table[])[6];', '')
def test_spacing_before_braces(self):
self.assert_lint('if (foo){', 'Missing space before {'
' [whitespace/braces] [5]')
self.assert_lint('for{', 'Missing space before {'
' [whitespace/braces] [5]')
self.assert_lint('for {', '')
self.assert_lint('EXPECT_DEBUG_DEATH({', '')
def test_spacing_between_braces(self):
self.assert_lint(' { }', '')
self.assert_lint(' {}', '')
self.assert_lint(' { }', 'Too many spaces inside { }. [whitespace/braces] [5]')
def test_spacing_around_else(self):
self.assert_lint('}else {', 'Missing space before else'
' [whitespace/braces] [5]')
self.assert_lint('} else{', 'Missing space before {'
' [whitespace/braces] [5]')
self.assert_lint('} else {', '')
self.assert_lint('} else if', '')
def test_spacing_for_binary_ops(self):
self.assert_lint('if (foo<=bar) {', 'Missing spaces around <='
' [whitespace/operators] [3]')
self.assert_lint('if (foo<bar) {', 'Missing spaces around <'
' [whitespace/operators] [3]')
self.assert_lint('if (foo<bar->baz) {', 'Missing spaces around <'
' [whitespace/operators] [3]')
self.assert_lint('if (foo<bar->bar) {', 'Missing spaces around <'
' [whitespace/operators] [3]')
self.assert_lint('typedef hash_map<Foo, Bar', 'Missing spaces around <'
' [whitespace/operators] [3]')
self.assert_lint('typedef hash_map<FoooooType, BaaaaarType,', '')
self.assert_lint('a<Foo> t+=b;', 'Missing spaces around +='
' [whitespace/operators] [3]')
self.assert_lint('a<Foo> t-=b;', 'Missing spaces around -='
' [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t*=b;', 'Missing spaces around *='
' [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t/=b;', 'Missing spaces around /='
' [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t|=b;', 'Missing spaces around |='
' [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t&=b;', 'Missing spaces around &='
' [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t<<=b;', 'Missing spaces around <<='
' [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t>>=b;', 'Missing spaces around >>='
' [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t>>=&b|c;', 'Missing spaces around >>='
' [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t<<=*b/c;', 'Missing spaces around <<='
' [whitespace/operators] [3]')
self.assert_lint('a<Foo> t -= b;', '')
self.assert_lint('a<Foo> t += b;', '')
self.assert_lint('a<Foo*> t *= b;', '')
self.assert_lint('a<Foo*> t /= b;', '')
self.assert_lint('a<Foo*> t |= b;', '')
self.assert_lint('a<Foo*> t &= b;', '')
self.assert_lint('a<Foo*> t <<= b;', '')
self.assert_lint('a<Foo*> t >>= b;', '')
self.assert_lint('a<Foo*> t >>= &b|c;', 'Missing spaces around |'
' [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t <<= *b/c;', 'Missing spaces around /'
' [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t <<= b/c; //Test', [
'Should have a space between // and comment '
'[whitespace/comments] [4]', 'Missing'
' spaces around / [whitespace/operators] [3]'])
self.assert_lint('a<Foo*> t <<= b||c; //Test', ['One space before end'
' of line comments [whitespace/comments] [5]',
'Should have a space between // and comment '
'[whitespace/comments] [4]',
'Missing spaces around || [whitespace/operators] [3]'])
self.assert_lint('a<Foo*> t <<= b&&c; // Test', 'Missing spaces around'
' && [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t <<= b&&&c; // Test', 'Missing spaces around'
' && [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t <<= b&&*c; // Test', 'Missing spaces around'
' && [whitespace/operators] [3]')
self.assert_lint('a<Foo*> t <<= b && *c; // Test', '')
self.assert_lint('a<Foo*> t <<= b && &c; // Test', '')
self.assert_lint('a<Foo*> t <<= b || &c; /*Test', 'Complex multi-line '
'/*...*/-style comment found. Lint may give bogus '
'warnings. Consider replacing these with //-style'
' comments, with #if 0...#endif, or with more clearly'
' structured multi-line comments. [readability/multiline_comment] [5]')
self.assert_lint('a<Foo&> t <<= &b | &c;', '')
self.assert_lint('a<Foo*> t <<= &b & &c; // Test', '')
self.assert_lint('a<Foo*> t <<= *b / &c; // Test', '')
self.assert_lint('if (a=b == 1)', 'Missing spaces around = [whitespace/operators] [4]')
self.assert_lint('a = 1<<20', 'Missing spaces around << [whitespace/operators] [3]')
self.assert_lint('a = 1>> 20', 'Missing spaces around >> [whitespace/operators] [3]')
self.assert_lint('a = 1 >>20', 'Missing spaces around >> [whitespace/operators] [3]')
self.assert_lint('a = 1>>20', 'Missing spaces around >> [whitespace/operators] [3]')
self.assert_lint('func(OwnPtr<Vector<Foo>>)', '')
self.assert_lint('func(OwnPtr<Vector<Foo>> foo)', '')
self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar>>>)', '')
self.assert_lint('func(OwnPtr<Vector<Foo> >)', 'Use >> for ending template instead of > >. [readability/templatebrackets] [3]')
self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar>> >)', 'Use >> for ending template instead of > >. [readability/templatebrackets] [3]')
self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar> >>)', 'Use >> for ending template instead of > >. [readability/templatebrackets] [3]')
self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar> > >)', 'Use >> for ending template instead of > >. [readability/templatebrackets] [3]')
self.assert_lint('Vector< ::Foo>)', 'Use <:: for template start instead of < ::. [readability/templatebrackets] [3]')
self.assert_lint('Vector<Vector< ::Foo>>)', 'Use <:: for template start instead of < ::. [readability/templatebrackets] [3]')
# FIXME: The following test should not show any error.
self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar\n >>>)',
'Missing spaces around < [whitespace/operators] [3]')
self.assert_lint('if (a = b == 1)', '')
self.assert_lint('a = 1 << 20', '')
self.assert_multi_line_lint('#include <sys/io.h>\n', '')
self.assert_multi_line_lint('#import <foo/bar.h>\n', '')
def test_operator_methods(self):
self.assert_lint('String operator+(const String&, const String&);', '')
self.assert_lint('String operator/(const String&, const String&);', '')
self.assert_lint('bool operator==(const String&, const String&);', '')
self.assert_lint('String& operator-=(const String&, const String&);', '')
self.assert_lint('String& operator+=(const String&, const String&);', '')
self.assert_lint('String& operator*=(const String&, const String&);', '')
self.assert_lint('String& operator%=(const String&, const String&);', '')
self.assert_lint('String& operator&=(const String&, const String&);', '')
self.assert_lint('String& operator<<=(const String&, const String&);', '')
self.assert_lint('String& operator>>=(const String&, const String&);', '')
self.assert_lint('String& operator|=(const String&, const String&);', '')
self.assert_lint('String& operator^=(const String&, const String&);', '')
def test_spacing_before_last_semicolon(self):
self.assert_lint('call_function() ;',
'Extra space before last semicolon. If this should be an '
'empty statement, use { } instead.'
' [whitespace/semicolon] [5]')
self.assert_lint('while (true) ;',
'Extra space before last semicolon. If this should be an '
'empty statement, use { } instead.'
' [whitespace/semicolon] [5]')
self.assert_lint('default:;',
'Semicolon defining empty statement. Use { } instead.'
' [whitespace/semicolon] [5]')
self.assert_lint(' ;',
'Line contains only semicolon. If this should be an empty '
'statement, use { } instead.'
' [whitespace/semicolon] [5]')
self.assert_lint('for (int i = 0; ;', '')
# Static or global STL strings.
def test_static_or_global_stlstrings(self):
self.assert_lint('string foo;',
'For a static/global string constant, use a C style '
'string instead: "char foo[]".'
' [runtime/string] [4]')
self.assert_lint('string kFoo = "hello"; // English',
'For a static/global string constant, use a C style '
'string instead: "char kFoo[]".'
' [runtime/string] [4]')
self.assert_lint('static string foo;',
'For a static/global string constant, use a C style '
'string instead: "static char foo[]".'
' [runtime/string] [4]')
self.assert_lint('static const string foo;',
'For a static/global string constant, use a C style '
'string instead: "static const char foo[]".'
' [runtime/string] [4]')
self.assert_lint('string Foo::bar;',
'For a static/global string constant, use a C style '
'string instead: "char Foo::bar[]".'
' [runtime/string] [4]')
# Rare case.
self.assert_lint('string foo("foobar");',
'For a static/global string constant, use a C style '
'string instead: "char foo[]".'
' [runtime/string] [4]')
# Should not catch local or member variables.
self.assert_lint(' string foo', '')
# Should not catch functions.
self.assert_lint('string EmptyString() { return ""; }', '')
self.assert_lint('string EmptyString () { return ""; }', '')
self.assert_lint('string VeryLongNameFunctionSometimesEndsWith(\n'
' VeryLongNameType veryLongNameVariable) { }', '')
self.assert_lint('template<>\n'
'string FunctionTemplateSpecialization<SomeType>(\n'
' int x) { return ""; }', '')
self.assert_lint('template<>\n'
'string FunctionTemplateSpecialization<vector<A::B>* >(\n'
' int x) { return ""; }', '')
# should not catch methods of template classes.
self.assert_lint('string Class<Type>::Method() const\n'
'{\n'
' return "";\n'
'}\n', '')
self.assert_lint('string Class<Type>::Method(\n'
' int arg) const\n'
'{\n'
' return "";\n'
'}\n', '')
def test_no_spaces_in_function_calls(self):
self.assert_lint('TellStory(1, 3);',
'')
self.assert_lint('TellStory(1, 3 );',
'Extra space before )'
' [whitespace/parens] [2]')
self.assert_lint('TellStory(1 /* wolf */, 3 /* pigs */);',
'')
self.assert_multi_line_lint('#endif\n );',
'')
def test_one_spaces_between_code_and_comments(self):
self.assert_lint('} // namespace foo',
'')
self.assert_lint('}// namespace foo',
'One space before end of line comments'
' [whitespace/comments] [5]')
self.assert_lint('printf("foo"); // Outside quotes.',
'')
self.assert_lint('int i = 0; // Having one space is fine.','')
self.assert_lint('int i = 0; // Having two spaces is bad.',
'One space before end of line comments'
' [whitespace/comments] [5]')
self.assert_lint('int i = 0; // Having three spaces is bad.',
'One space before end of line comments'
' [whitespace/comments] [5]')
self.assert_lint('// Top level comment', '')
self.assert_lint(' // Line starts with four spaces.', '')
self.assert_lint('foo();\n'
'{ // A scope is opening.', '')
self.assert_lint(' foo();\n'
' { // An indented scope is opening.', '')
self.assert_lint('if (foo) { // not a pure scope',
'')
self.assert_lint('printf("// In quotes.")', '')
self.assert_lint('printf("\\"%s // In quotes.")', '')
self.assert_lint('printf("%s", "// In quotes.")', '')
def test_line_ending_in_whitespace(self):
self.assert_lint('int a; // This is a sentence.',
'')
self.assert_lint('int a; // This is a sentence. ',
'Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]')
def test_space_after_comment_marker(self):
self.assert_lint('//', '')
self.assert_lint('//x', 'Should have a space between // and comment'
' [whitespace/comments] [4]')
self.assert_lint('// x', '')
self.assert_lint('//----', '')
self.assert_lint('//====', '')
self.assert_lint('//////', '')
self.assert_lint('////// x', '')
self.assert_lint('/// x', '')
self.assert_lint('////x', 'Should have a space between // and comment'
' [whitespace/comments] [4]')
def test_newline_at_eof(self):
def do_test(self, data, is_missing_eof):
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data('foo.cpp', 'cpp', data.split('\n'),
error_collector)
# The warning appears only once.
self.assertEqual(
int(is_missing_eof),
error_collector.results().count(
'Could not find a newline character at the end of the file.'
' [whitespace/ending_newline] [5]'))
do_test(self, '// Newline\n// at EOF\n', False)
do_test(self, '// No newline\n// at EOF', True)
def test_invalid_utf8(self):
def do_test(self, raw_bytes, has_invalid_utf8):
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data('foo.cpp', 'cpp',
unicode(raw_bytes, 'utf8', 'replace').split('\n'),
error_collector)
# The warning appears only once.
self.assertEqual(
int(has_invalid_utf8),
error_collector.results().count(
'Line contains invalid UTF-8'
' (or Unicode replacement character).'
' [readability/utf8] [5]'))
do_test(self, 'Hello world\n', False)
do_test(self, '\xe9\x8e\xbd\n', False)
do_test(self, '\xe9x\x8e\xbd\n', True)
# This is the encoding of the replacement character itself (which
# you can see by evaluating codecs.getencoder('utf8')(u'\ufffd')).
do_test(self, '\xef\xbf\xbd\n', True)
def test_is_blank_line(self):
self.assertTrue(cpp_style.is_blank_line(''))
self.assertTrue(cpp_style.is_blank_line(' '))
self.assertTrue(cpp_style.is_blank_line(' \t\r\n'))
self.assertTrue(not cpp_style.is_blank_line('int a;'))
self.assertTrue(not cpp_style.is_blank_line('{'))
def test_blank_lines_check(self):
self.assert_blank_lines_check(['{\n', '\n', '\n', '}\n'], 1, 1)
self.assert_blank_lines_check([' if (foo) {\n', '\n', ' }\n'], 1, 1)
self.assert_blank_lines_check(
['\n', '// {\n', '\n', '\n', '// Comment\n', '{\n', '}\n'], 0, 0)
self.assert_blank_lines_check(['\n', 'run("{");\n', '\n'], 0, 0)
self.assert_blank_lines_check(['\n', ' if (foo) { return 0; }\n', '\n'], 0, 0)
def test_allow_blank_line_before_closing_namespace(self):
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data('foo.cpp', 'cpp',
['namespace {', '', '} // namespace'],
error_collector)
self.assertEqual(0, error_collector.results().count(
'Blank line at the end of a code block. Is this needed?'
' [whitespace/blank_line] [3]'))
def test_allow_blank_line_before_if_else_chain(self):
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data('foo.cpp', 'cpp',
['if (hoge) {',
'', # No warning
'} else if (piyo) {',
'', # No warning
'} else if (piyopiyo) {',
' hoge = true;', # No warning
'} else {',
'', # Warning on this line
'}'],
error_collector)
self.assertEqual(1, error_collector.results().count(
'Blank line at the end of a code block. Is this needed?'
' [whitespace/blank_line] [3]'))
def test_else_on_same_line_as_closing_braces(self):
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data('foo.cpp', 'cpp',
['if (hoge) {',
'',
'}',
' else {' # Warning on this line
'',
'}'],
error_collector)
self.assertEqual(1, error_collector.results().count(
'An else should appear on the same line as the preceding }'
' [whitespace/newline] [4]'))
def test_else_clause_not_on_same_line_as_else(self):
self.assert_lint(' else DoSomethingElse();',
'Else clause should never be on same line as else '
'(use 2 lines) [whitespace/newline] [4]')
self.assert_lint(' else ifDoSomethingElse();',
'Else clause should never be on same line as else '
'(use 2 lines) [whitespace/newline] [4]')
self.assert_lint(' else if (blah) {', '')
self.assert_lint(' variable_ends_in_else = true;', '')
def test_comma(self):
self.assert_lint('a = f(1,2);',
'Missing space after , [whitespace/comma] [3]')
self.assert_lint('int tmp=a,a=b,b=tmp;',
['Missing spaces around = [whitespace/operators] [4]',
'Missing space after , [whitespace/comma] [3]'])
self.assert_lint('f(a, /* name */ b);', '')
self.assert_lint('f(a, /* name */b);', '')
def test_declaration(self):
self.assert_lint('int a;', '')
self.assert_lint('int a;', 'Extra space between int and a [whitespace/declaration] [3]')
self.assert_lint('int* a;', 'Extra space between int* and a [whitespace/declaration] [3]')
self.assert_lint('else if { }', '')
self.assert_lint('else if { }', 'Extra space between else and if [whitespace/declaration] [3]')
def test_pointer_reference_marker_location(self):
self.assert_lint('int* b;', '', 'foo.cpp')
self.assert_lint('int *b;',
'Declaration has space between type name and * in int *b [whitespace/declaration] [3]',
'foo.cpp')
self.assert_lint('return *b;', '', 'foo.cpp')
self.assert_lint('delete *b;', '', 'foo.cpp')
self.assert_lint('int *b;', '', 'foo.c')
self.assert_lint('int* b;',
'Declaration has space between * and variable name in int* b [whitespace/declaration] [3]',
'foo.c')
self.assert_lint('int& b;', '', 'foo.cpp')
self.assert_lint('int &b;',
'Declaration has space between type name and & in int &b [whitespace/declaration] [3]',
'foo.cpp')
self.assert_lint('return &b;', '', 'foo.cpp')
def test_indent(self):
self.assert_lint('static int noindent;', '')
self.assert_lint(' int fourSpaceIndent;', '')
self.assert_lint(' int oneSpaceIndent;',
'Weird number of spaces at line-start. '
'Are you using a 4-space indent? [whitespace/indent] [3]')
self.assert_lint(' int threeSpaceIndent;',
'Weird number of spaces at line-start. '
'Are you using a 4-space indent? [whitespace/indent] [3]')
self.assert_lint(' char* oneSpaceIndent = "public:";',
'Weird number of spaces at line-start. '
'Are you using a 4-space indent? [whitespace/indent] [3]')
self.assert_lint(' public:',
'Weird number of spaces at line-start. '
'Are you using a 4-space indent? [whitespace/indent] [3]')
self.assert_lint(' public:',
'Weird number of spaces at line-start. '
'Are you using a 4-space indent? [whitespace/indent] [3]')
self.assert_lint(' public:',
'Weird number of spaces at line-start. '
'Are you using a 4-space indent? [whitespace/indent] [3]')
self.assert_multi_line_lint(
'class Foo {\n'
'public:\n'
' enum Bar {\n'
' Alpha,\n'
' Beta,\n'
'#if ENABLED_BETZ\n'
' Charlie,\n'
'#endif\n'
' };\n'
'};',
'')
self.assert_multi_line_lint(
'if (true) {\n'
' myFunction(reallyLongParam1, reallyLongParam2,\n'
' reallyLongParam3);\n'
'}\n',
'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]')
self.assert_multi_line_lint(
'if (true) {\n'
' myFunction(reallyLongParam1, reallyLongParam2,\n'
' reallyLongParam3);\n'
'}\n',
'When wrapping a line, only indent 4 spaces. [whitespace/indent] [3]')
def test_not_alabel(self):
self.assert_lint('MyVeryLongNamespace::MyVeryLongClassName::', '')
def test_tab(self):
self.assert_lint('\tint a;',
'Tab found; better to use spaces [whitespace/tab] [1]')
self.assert_lint('int a = 5;\t// set a to 5',
'Tab found; better to use spaces [whitespace/tab] [1]')
def test_unnamed_namespaces_in_headers(self):
self.assert_language_rules_check(
'foo.h', 'namespace {',
'Do not use unnamed namespaces in header files. See'
' http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information. [build/namespaces] [4]')
# namespace registration macros are OK.
self.assert_language_rules_check('foo.h', 'namespace { \\', '')
# named namespaces are OK.
self.assert_language_rules_check('foo.h', 'namespace foo {', '')
self.assert_language_rules_check('foo.h', 'namespace foonamespace {', '')
self.assert_language_rules_check('foo.cpp', 'namespace {', '')
self.assert_language_rules_check('foo.cpp', 'namespace foo {', '')
def test_build_class(self):
# Test that the linter can parse to the end of class definitions,
# and that it will report when it can't.
# Use multi-line linter because it performs the ClassState check.
self.assert_multi_line_lint(
'class Foo {',
'Failed to find complete declaration of class Foo'
' [build/class] [5]')
# Don't warn on forward declarations of various types.
self.assert_multi_line_lint(
'class Foo;',
'')
self.assert_multi_line_lint(
'''\
struct Foo*
foo = NewFoo();''',
'')
# Here is an example where the linter gets confused, even though
# the code doesn't violate the style guide.
self.assert_multi_line_lint(
'class Foo\n'
'#ifdef DERIVE_FROM_GOO\n'
' : public Goo {\n'
'#else\n'
' : public Hoo {\n'
'#endif\n'
'};',
'Failed to find complete declaration of class Foo'
' [build/class] [5]')
def test_build_end_comment(self):
# The crosstool compiler we currently use will fail to compile the
# code in this test, so we might consider removing the lint check.
self.assert_lint('#endif Not a comment',
'Uncommented text after #endif is non-standard.'
' Use a comment.'
' [build/endif_comment] [5]')
def test_build_forward_decl(self):
# The crosstool compiler we currently use will fail to compile the
# code in this test, so we might consider removing the lint check.
self.assert_lint('class Foo::Goo;',
'Inner-style forward declarations are invalid.'
' Remove this line.'
' [build/forward_decl] [5]')
def test_build_header_guard(self):
file_path = 'mydir/Foo.h'
# We can't rely on our internal stuff to get a sane path on the open source
# side of things, so just parse out the suggested header guard. This
# doesn't allow us to test the suggested header guard, but it does let us
# test all the other header tests.
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(file_path, 'h', [], error_collector)
expected_guard = ''
matcher = re.compile(
'No \#ifndef header guard found\, suggested CPP variable is\: ([A-Za-z_0-9]+) ')
for error in error_collector.result_list():
matches = matcher.match(error)
if matches:
expected_guard = matches.group(1)
break
# Make sure we extracted something for our header guard.
self.assertNotEqual(expected_guard, '')
# Wrong guard
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(file_path, 'h',
['#ifndef FOO_H', '#define FOO_H'], error_collector)
self.assertEqual(
1,
error_collector.result_list().count(
'#ifndef header guard has wrong style, please use: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.result_list())
# No define
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(file_path, 'h',
['#ifndef %s' % expected_guard], error_collector)
self.assertEqual(
1,
error_collector.result_list().count(
'No #ifndef header guard found, suggested CPP variable is: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.result_list())
# Mismatched define
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(file_path, 'h',
['#ifndef %s' % expected_guard,
'#define FOO_H'],
error_collector)
self.assertEqual(
1,
error_collector.result_list().count(
'No #ifndef header guard found, suggested CPP variable is: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.result_list())
# No header guard errors
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(file_path, 'h',
['#ifndef %s' % expected_guard,
'#define %s' % expected_guard,
'#endif // %s' % expected_guard],
error_collector)
for line in error_collector.result_list():
if line.find('build/header_guard') != -1:
self.fail('Unexpected error: %s' % line)
# Completely incorrect header guard
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(file_path, 'h',
['#ifndef FOO',
'#define FOO',
'#endif // FOO'],
error_collector)
self.assertEqual(
1,
error_collector.result_list().count(
'#ifndef header guard has wrong style, please use: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.result_list())
# Special case for flymake
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data('mydir/Foo_flymake.h', 'h',
['#ifndef %s' % expected_guard,
'#define %s' % expected_guard,
'#endif // %s' % expected_guard],
error_collector)
for line in error_collector.result_list():
if line.find('build/header_guard') != -1:
self.fail('Unexpected error: %s' % line)
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data('mydir/Foo_flymake.h', 'h', [], error_collector)
self.assertEqual(
1,
error_collector.result_list().count(
'No #ifndef header guard found, suggested CPP variable is: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.result_list())
# Verify that we don't blindly suggest the WTF prefix for all headers.
self.assertFalse(expected_guard.startswith('WTF_'))
# Allow the WTF_ prefix for files in that directory.
header_guard_filter = FilterConfiguration(('-', '+build/header_guard'))
error_collector = ErrorCollector(self.assertTrue, header_guard_filter)
self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h',
['#ifndef WTF_TestName_h', '#define WTF_TestName_h'],
error_collector)
self.assertEqual(0, len(error_collector.result_list()),
error_collector.result_list())
# Also allow the non WTF_ prefix for files in that directory.
error_collector = ErrorCollector(self.assertTrue, header_guard_filter)
self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h',
['#ifndef TestName_h', '#define TestName_h'],
error_collector)
self.assertEqual(0, len(error_collector.result_list()),
error_collector.result_list())
# Verify that we suggest the WTF prefix version.
error_collector = ErrorCollector(self.assertTrue, header_guard_filter)
self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h',
['#ifndef BAD_TestName_h', '#define BAD_TestName_h'],
error_collector)
self.assertEqual(
1,
error_collector.result_list().count(
'#ifndef header guard has wrong style, please use: WTF_TestName_h'
' [build/header_guard] [5]'),
error_collector.result_list())
# Verify that the Chromium-style header guard is allowed as well.
error_collector = ErrorCollector(self.assertTrue, header_guard_filter)
self.process_file_data('Source/foo/testname.h', 'h',
['#ifndef BLINK_FOO_TESTNAME_H_',
'#define BLINK_FOO_TESTNAME_H_'],
error_collector)
self.assertEqual(0, len(error_collector.result_list()),
error_collector.result_list())
def test_build_printf_format(self):
self.assert_lint(
r'printf("\%%d", value);',
'%, [, (, and { are undefined character escapes. Unescape them.'
' [build/printf_format] [3]')
self.assert_lint(
r'snprintf(buffer, sizeof(buffer), "\[%d", value);',
'%, [, (, and { are undefined character escapes. Unescape them.'
' [build/printf_format] [3]')
self.assert_lint(
r'fprintf(file, "\(%d", value);',
'%, [, (, and { are undefined character escapes. Unescape them.'
' [build/printf_format] [3]')
self.assert_lint(
r'vsnprintf(buffer, sizeof(buffer), "\\\{%d", ap);',
'%, [, (, and { are undefined character escapes. Unescape them.'
' [build/printf_format] [3]')
# Don't warn if double-slash precedes the symbol
self.assert_lint(r'printf("\\%%%d", value);',
'')
def test_runtime_printf_format(self):
self.assert_lint(
r'fprintf(file, "%q", value);',
'%q in format strings is deprecated. Use %ll instead.'
' [runtime/printf_format] [3]')
self.assert_lint(
r'aprintf(file, "The number is %12q", value);',
'%q in format strings is deprecated. Use %ll instead.'
' [runtime/printf_format] [3]')
self.assert_lint(
r'printf(file, "The number is" "%-12q", value);',
'%q in format strings is deprecated. Use %ll instead.'
' [runtime/printf_format] [3]')
self.assert_lint(
r'printf(file, "The number is" "%+12q", value);',
'%q in format strings is deprecated. Use %ll instead.'
' [runtime/printf_format] [3]')
self.assert_lint(
r'printf(file, "The number is" "% 12q", value);',
'%q in format strings is deprecated. Use %ll instead.'
' [runtime/printf_format] [3]')
self.assert_lint(
r'snprintf(file, "Never mix %d and %1$d parmaeters!", value);',
'%N$ formats are unconventional. Try rewriting to avoid them.'
' [runtime/printf_format] [2]')
def assert_lintLogCodeOnError(self, code, expected_message):
# Special assert_lint which logs the input code on error.
result = self.perform_single_line_lint(code, 'foo.cpp')
if result != expected_message:
self.fail('For code: "%s"\nGot: "%s"\nExpected: "%s"'
% (code, result, expected_message))
def test_build_storage_class(self):
qualifiers = [None, 'const', 'volatile']
signs = [None, 'signed', 'unsigned']
types = ['void', 'char', 'int', 'float', 'double',
'schar', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64']
storage_classes = ['auto', 'extern', 'register', 'static', 'typedef']
build_storage_class_error_message = (
'Storage class (static, extern, typedef, etc) should be first.'
' [build/storage_class] [5]')
# Some explicit cases. Legal in C++, deprecated in C99.
self.assert_lint('const int static foo = 5;',
build_storage_class_error_message)
self.assert_lint('char static foo;',
build_storage_class_error_message)
self.assert_lint('double const static foo = 2.0;',
build_storage_class_error_message)
self.assert_lint('uint64 typedef unsignedLongLong;',
build_storage_class_error_message)
self.assert_lint('int register foo = 0;',
build_storage_class_error_message)
# Since there are a very large number of possibilities, randomly
# construct declarations.
# Make sure that the declaration is logged if there's an error.
# Seed generator with an integer for absolute reproducibility.
random.seed(25)
for unused_i in range(10):
# Build up random list of non-storage-class declaration specs.
other_decl_specs = [random.choice(qualifiers), random.choice(signs),
random.choice(types)]
# remove None
other_decl_specs = filter(lambda x: x is not None, other_decl_specs)
# shuffle
random.shuffle(other_decl_specs)
# insert storage class after the first
storage_class = random.choice(storage_classes)
insertion_point = random.randint(1, len(other_decl_specs))
decl_specs = (other_decl_specs[0:insertion_point]
+ [storage_class]
+ other_decl_specs[insertion_point:])
self.assert_lintLogCodeOnError(
' '.join(decl_specs) + ';',
build_storage_class_error_message)
# but no error if storage class is first
self.assert_lintLogCodeOnError(
storage_class + ' ' + ' '.join(other_decl_specs),
'')
def test_legal_copyright(self):
legal_copyright_message = (
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"'
' [legal/copyright] [5]')
copyright_line = '// Copyright 2008 Google Inc. All Rights Reserved.'
file_path = 'mydir/googleclient/foo.cpp'
# There should be a copyright message in the first 10 lines
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(file_path, 'cpp', [], error_collector)
self.assertEqual(
1,
error_collector.result_list().count(legal_copyright_message))
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(
file_path, 'cpp',
['' for unused_i in range(10)] + [copyright_line],
error_collector)
self.assertEqual(
1,
error_collector.result_list().count(legal_copyright_message))
# Test that warning isn't issued if Copyright line appears early enough.
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(file_path, 'cpp', [copyright_line], error_collector)
for message in error_collector.result_list():
if message.find('legal/copyright') != -1:
self.fail('Unexpected error: %s' % message)
error_collector = ErrorCollector(self.assertTrue)
self.process_file_data(
file_path, 'cpp',
['' for unused_i in range(9)] + [copyright_line],
error_collector)
for message in error_collector.result_list():
if message.find('legal/copyright') != -1:
self.fail('Unexpected error: %s' % message)
def test_invalid_increment(self):
self.assert_lint('*count++;',
'Changing pointer instead of value (or unused value of '
'operator*). [runtime/invalid_increment] [5]')
# Integral bitfields must be declared with either signed or unsigned keyword.
def test_plain_integral_bitfields(self):
errmsg = ('Please declare integral type bitfields with either signed or unsigned. [runtime/bitfields] [5]')
self.assert_lint('int a : 30;', errmsg)
self.assert_lint('mutable short a : 14;', errmsg)
self.assert_lint('const char a : 6;', errmsg)
self.assert_lint('long int a : 30;', errmsg)
self.assert_lint('int a = 1 ? 0 : 30;', '')
# A mixture of unsigned and bool bitfields in a class will generate a warning.
def test_mixing_unsigned_bool_bitfields(self):
def errmsg(bool_bitfields, unsigned_bitfields, name):
bool_list = ', '.join(bool_bitfields)
unsigned_list = ', '.join(unsigned_bitfields)
return ('The class %s contains mixed unsigned and bool bitfields, '
'which will pack into separate words on the MSVC compiler.\n'
'Bool bitfields are [%s].\nUnsigned bitfields are [%s].\n'
'Consider converting bool bitfields to unsigned. [runtime/bitfields] [5]'
% (name, bool_list, unsigned_list))
def build_test_case(bitfields, name, will_warn, extra_warnings=[]):
bool_bitfields = []
unsigned_bitfields = []
test_string = 'class %s {\n' % (name,)
line = 2
for bitfield in bitfields:
test_string += ' %s %s : %d;\n' % bitfield
if bitfield[0] == 'bool':
bool_bitfields.append('%d: %s' % (line, bitfield[1]))
elif bitfield[0].startswith('unsigned'):
unsigned_bitfields.append('%d: %s' % (line, bitfield[1]))
line += 1
test_string += '}\n'
error = ''
if will_warn:
error = errmsg(bool_bitfields, unsigned_bitfields, name)
if extra_warnings and error:
error = extra_warnings + [error]
self.assert_multi_line_lint(test_string, error)
build_test_case([('bool', 'm_boolMember', 4), ('unsigned', 'm_unsignedMember', 3)],
'MyClass', True)
build_test_case([('bool', 'm_boolMember', 4), ('bool', 'm_anotherBool', 3)],
'MyClass', False)
build_test_case([('unsigned', 'm_unsignedMember', 4), ('unsigned', 'm_anotherUnsigned', 3)],
'MyClass', False)
build_test_case([('bool', 'm_boolMember', 4), ('bool', 'm_anotherbool', 3),
('bool', 'm_moreBool', 1), ('bool', 'm_lastBool', 1),
('unsigned int', 'm_tokenUnsigned', 4)],
'MyClass', True, ['Omit int when using unsigned [runtime/unsigned] [1]'])
self.assert_multi_line_lint('class NoProblemsHere {\n'
' bool m_boolMember;\n'
' unsigned m_unsignedMember;\n'
' unsigned m_bitField1 : 1;\n'
' unsigned m_bitField4 : 4;\n'
'}\n', '')
# Bitfields which are not declared unsigned or bool will generate a warning.
def test_unsigned_bool_bitfields(self):
def errmsg(member, name, bit_type):
return ('Member %s of class %s defined as a bitfield of type %s. '
'Please declare all bitfields as unsigned. [runtime/bitfields] [4]'
% (member, name, bit_type))
def warning_bitfield_test(member, name, bit_type, bits):
self.assert_multi_line_lint('class %s {\n%s %s: %d;\n}\n'
% (name, bit_type, member, bits),
errmsg(member, name, bit_type))
def safe_bitfield_test(member, name, bit_type, bits):
self.assert_multi_line_lint('class %s {\n%s %s: %d;\n}\n'
% (name, bit_type, member, bits),
'')
warning_bitfield_test('a', 'A', 'int32_t', 25)
warning_bitfield_test('m_someField', 'SomeClass', 'signed', 4)
warning_bitfield_test('m_someField', 'SomeClass', 'SomeEnum', 2)
safe_bitfield_test('a', 'A', 'unsigned', 22)
safe_bitfield_test('m_someField', 'SomeClass', 'bool', 1)
safe_bitfield_test('m_someField', 'SomeClass', 'unsigned', 2)
# Declarations in 'Expected' or 'SameSizeAs' classes are OK.
warning_bitfield_test('m_bitfields', 'SomeClass', 'int32_t', 32)
safe_bitfield_test('m_bitfields', 'ExpectedSomeClass', 'int32_t', 32)
safe_bitfield_test('m_bitfields', 'SameSizeAsSomeClass', 'int32_t', 32)
class CleansedLinesTest(unittest.TestCase):
def test_init(self):
lines = ['Line 1',
'Line 2',
'Line 3 // Comment test',
'Line 4 "foo"']
clean_lines = cpp_style.CleansedLines(lines)
self.assertEqual(lines, clean_lines.raw_lines)
self.assertEqual(4, clean_lines.num_lines())
self.assertEqual(['Line 1',
'Line 2',
'Line 3 ',
'Line 4 "foo"'],
clean_lines.lines)
self.assertEqual(['Line 1',
'Line 2',
'Line 3 ',
'Line 4 ""'],
clean_lines.elided)
def test_init_empty(self):
clean_lines = cpp_style.CleansedLines([])
self.assertEqual([], clean_lines.raw_lines)
self.assertEqual(0, clean_lines.num_lines())
def test_collapse_strings(self):
collapse = cpp_style.CleansedLines.collapse_strings
self.assertEqual('""', collapse('""')) # "" (empty)
self.assertEqual('"""', collapse('"""')) # """ (bad)
self.assertEqual('""', collapse('"xyz"')) # "xyz" (string)
self.assertEqual('""', collapse('"\\\""')) # "\"" (string)
self.assertEqual('""', collapse('"\'"')) # "'" (string)
self.assertEqual('"\"', collapse('"\"')) # "\" (bad)
self.assertEqual('""', collapse('"\\\\"')) # "\\" (string)
self.assertEqual('"', collapse('"\\\\\\"')) # "\\\" (bad)
self.assertEqual('""', collapse('"\\\\\\\\"')) # "\\\\" (string)
self.assertEqual('\'\'', collapse('\'\'')) # '' (empty)
self.assertEqual('\'\'', collapse('\'a\'')) # 'a' (char)
self.assertEqual('\'\'', collapse('\'\\\'\'')) # '\'' (char)
self.assertEqual('\'', collapse('\'\\\'')) # '\' (bad)
self.assertEqual('', collapse('\\012')) # '\012' (char)
self.assertEqual('', collapse('\\xfF0')) # '\xfF0' (char)
self.assertEqual('', collapse('\\n')) # '\n' (char)
self.assertEqual('\#', collapse('\\#')) # '\#' (bad)
self.assertEqual('StringReplace(body, "", "");',
collapse('StringReplace(body, "\\\\", "\\\\\\\\");'))
self.assertEqual('\'\' ""',
collapse('\'"\' "foo"'))
class OrderOfIncludesTest(CppStyleTestBase):
def setUp(self):
self.include_state = cpp_style._IncludeState()
# Cheat os.path.abspath called in FileInfo class.
self.os_path_abspath_orig = os.path.abspath
os.path.abspath = lambda value: value
def tearDown(self):
os.path.abspath = self.os_path_abspath_orig
def test_try_drop_common_suffixes(self):
self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h'))
self.assertEqual('foo/bar/foo',
cpp_style._drop_common_suffixes('foo/bar/foo_inl.h'))
self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp'))
self.assertEqual('foo/foo_unusualinternal',
cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h'))
self.assertEqual('',
cpp_style._drop_common_suffixes('_test.cpp'))
self.assertEqual('test',
cpp_style._drop_common_suffixes('test.cpp'))
class OrderOfIncludesTest(CppStyleTestBase):
def setUp(self):
self.include_state = cpp_style._IncludeState()
# Cheat os.path.abspath called in FileInfo class.
self.os_path_abspath_orig = os.path.abspath
self.os_path_isfile_orig = os.path.isfile
os.path.abspath = lambda value: value
def tearDown(self):
os.path.abspath = self.os_path_abspath_orig
os.path.isfile = self.os_path_isfile_orig
def test_check_next_include_order__no_config(self):
self.assertEqual('Header file should not contain WebCore config.h.',
self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, True, True))
def test_check_next_include_order__no_self(self):
self.assertEqual('Header file should not contain itself.',
self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, True, True))
# Test actual code to make sure that header types are correctly assigned.
self.assert_language_rules_check('Foo.h',
'#include "Foo.h"\n',
'Header file should not contain itself. Should be: alphabetically sorted.'
' [build/include_order] [4]')
self.assert_language_rules_check('FooBar.h',
'#include "Foo.h"\n',
'')
def test_check_next_include_order__likely_then_config(self):
self.assertEqual('Found header this file implements before WebCore config.h.',
self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False, True))
self.assertEqual('Found WebCore config.h after a header this file implements.',
self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))
def test_check_next_include_order__other_then_config(self):
self.assertEqual('Found other header before WebCore config.h.',
self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False, True))
self.assertEqual('Found WebCore config.h after other header.',
self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))
def test_check_next_include_order__config_then_other_then_likely(self):
self.assertEqual('', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))
self.assertEqual('Found other header before a header this file implements.',
self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False, True))
self.assertEqual('Found header this file implements after other header.',
self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False, True))
def test_check_alphabetical_include_order(self):
self.assert_language_rules_check('foo.h',
'#include "a.h"\n'
'#include "c.h"\n'
'#include "b.h"\n',
'Alphabetical sorting problem. [build/include_order] [4]')
self.assert_language_rules_check('foo.h',
'#include "a.h"\n'
'#include "b.h"\n'
'#include "c.h"\n',
'')
self.assert_language_rules_check('foo.h',
'#include <assert.h>\n'
'#include "bar.h"\n',
'Alphabetical sorting problem. [build/include_order] [4]')
self.assert_language_rules_check('foo.h',
'#include "bar.h"\n'
'#include <assert.h>\n',
'')
def test_check_alphabetical_include_order_errors_reported_for_both_lines(self):
# If one of the two lines of out of order headers are filtered, the error should be
# reported on the other line.
self.assert_language_rules_check('foo.h',
'#include "a.h"\n'
'#include "c.h"\n'
'#include "b.h"\n',
'Alphabetical sorting problem. [build/include_order] [4]',
lines_to_check=[2])
self.assert_language_rules_check('foo.h',
'#include "a.h"\n'
'#include "c.h"\n'
'#include "b.h"\n',
'Alphabetical sorting problem. [build/include_order] [4]',
lines_to_check=[3])
# If no lines are filtered, the error should be reported only once.
self.assert_language_rules_check('foo.h',
'#include "a.h"\n'
'#include "c.h"\n'
'#include "b.h"\n',
'Alphabetical sorting problem. [build/include_order] [4]')
def test_check_line_break_after_own_header(self):
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'#include "bar.h"\n',
'You should add a blank line after implementation file\'s own header. [build/include_order] [4]')
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'\n'
'#include "bar.h"\n',
'')
def test_check_preprocessor_in_include_section(self):
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'\n'
'#ifdef BAZ\n'
'#include "baz.h"\n'
'#else\n'
'#include "foobar.h"\n'
'#endif"\n'
'#include "bar.h"\n', # No flag because previous is in preprocessor section
'')
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'\n'
'#ifdef BAZ\n'
'#include "baz.h"\n'
'#endif"\n'
'#include "bar.h"\n'
'#include "a.h"\n', # Should still flag this.
'Alphabetical sorting problem. [build/include_order] [4]')
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'\n'
'#ifdef BAZ\n'
'#include "baz.h"\n'
'#include "bar.h"\n' #Should still flag this
'#endif"\n',
'Alphabetical sorting problem. [build/include_order] [4]')
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'\n'
'#ifdef BAZ\n'
'#include "baz.h"\n'
'#endif"\n'
'#ifdef FOOBAR\n'
'#include "foobar.h"\n'
'#endif"\n'
'#include "bar.h"\n'
'#include "a.h"\n', # Should still flag this.
'Alphabetical sorting problem. [build/include_order] [4]')
# Check that after an already included error, the sorting rules still work.
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'\n'
'#include "foo.h"\n'
'#include "g.h"\n',
'"foo.h" already included at foo.cpp:2 [build/include] [4]')
def test_primary_header(self):
# File with non-existing primary header should not produce errors.
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'\n'
'#include "bar.h"\n',
'')
# Pretend that header files exist.
os.path.isfile = lambda filename: True
# Missing include for existing primary header -> error.
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'\n'
'#include "bar.h"\n',
'Found other header before a header this file implements. '
'Should be: config.h, primary header, blank line, and then '
'alphabetically sorted. [build/include_order] [4]')
# Having include for existing primary header -> no error.
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'\n'
'#include "bar.h"\n',
'')
os.path.isfile = self.os_path_isfile_orig
def test_public_primary_header(self):
# System header is not considered a primary header.
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include <other/foo.h>\n'
'\n'
'#include "a.h"\n',
'Alphabetical sorting problem. [build/include_order] [4]')
# ...except that it starts with public/.
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include <public/foo.h>\n'
'\n'
'#include "a.h"\n',
'')
# Even if it starts with public/ its base part must match with the source file name.
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include <public/foop.h>\n'
'\n'
'#include "a.h"\n',
'Alphabetical sorting problem. [build/include_order] [4]')
def test_check_wtf_includes(self):
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'\n'
'#include <wtf/Assertions.h>\n',
'wtf includes should be "wtf/file.h" instead of <wtf/file.h>.'
' [build/include] [4]')
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'\n'
'#include "wtf/Assertions.h"\n',
'')
def test_check_cc_includes(self):
self.assert_language_rules_check('bar/chromium/foo.cpp',
'#include "config.h"\n'
'#include "foo.h"\n'
'\n'
'#include "cc/CCProxy.h"\n',
'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".'
' [build/include] [4]')
def test_classify_include(self):
classify_include = cpp_style._classify_include
include_state = cpp_style._IncludeState()
self.assertEqual(cpp_style._CONFIG_HEADER,
classify_include('foo/foo.cpp',
'config.h',
False, include_state))
self.assertEqual(cpp_style._PRIMARY_HEADER,
classify_include('foo/internal/foo.cpp',
'foo/public/foo.h',
False, include_state))
self.assertEqual(cpp_style._PRIMARY_HEADER,
classify_include('foo/internal/foo.cpp',
'foo/other/public/foo.h',
False, include_state))
self.assertEqual(cpp_style._OTHER_HEADER,
classify_include('foo/internal/foo.cpp',
'foo/other/public/foop.h',
False, include_state))
self.assertEqual(cpp_style._OTHER_HEADER,
classify_include('foo/foo.cpp',
'string',
True, include_state))
self.assertEqual(cpp_style._PRIMARY_HEADER,
classify_include('fooCustom.cpp',
'foo.h',
False, include_state))
self.assertEqual(cpp_style._PRIMARY_HEADER,
classify_include('PrefixFooCustom.cpp',
'Foo.h',
False, include_state))
self.assertEqual(cpp_style._MOC_HEADER,
classify_include('foo.cpp',
'foo.moc',
False, include_state))
self.assertEqual(cpp_style._MOC_HEADER,
classify_include('foo.cpp',
'moc_foo.cpp',
False, include_state))
# <public/foo.h> must be considered as primary even if is_system is True.
self.assertEqual(cpp_style._PRIMARY_HEADER,
classify_include('foo/foo.cpp',
'public/foo.h',
True, include_state))
self.assertEqual(cpp_style._OTHER_HEADER,
classify_include('foo.cpp',
'foo.h',
True, include_state))
self.assertEqual(cpp_style._OTHER_HEADER,
classify_include('foo.cpp',
'public/foop.h',
True, include_state))
# Qt private APIs use _p.h suffix.
self.assertEqual(cpp_style._PRIMARY_HEADER,
classify_include('foo.cpp',
'foo_p.h',
False, include_state))
# Tricky example where both includes might be classified as primary.
self.assert_language_rules_check('ScrollbarThemeWince.cpp',
'#include "config.h"\n'
'#include "ScrollbarThemeWince.h"\n'
'\n'
'#include "Scrollbar.h"\n',
'')
self.assert_language_rules_check('ScrollbarThemeWince.cpp',
'#include "config.h"\n'
'#include "Scrollbar.h"\n'
'\n'
'#include "ScrollbarThemeWince.h"\n',
'Found header this file implements after a header this file implements.'
' Should be: config.h, primary header, blank line, and then alphabetically sorted.'
' [build/include_order] [4]')
self.assert_language_rules_check('ResourceHandleWin.cpp',
'#include "config.h"\n'
'#include "ResourceHandle.h"\n'
'\n'
'#include "ResourceHandleWin.h"\n',
'')
def test_try_drop_common_suffixes(self):
self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h'))
self.assertEqual('foo/bar/foo',
cpp_style._drop_common_suffixes('foo/bar/foo_inl.h'))
self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp'))
self.assertEqual('foo/foo_unusualinternal',
cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h'))
self.assertEqual('',
cpp_style._drop_common_suffixes('_test.cpp'))
self.assertEqual('test',
cpp_style._drop_common_suffixes('test.cpp'))
self.assertEqual('test',
cpp_style._drop_common_suffixes('test.cpp'))
class CheckForFunctionLengthsTest(CppStyleTestBase):
def setUp(self):
# Reducing these thresholds for the tests speeds up tests significantly.
self.old_normal_trigger = cpp_style._FunctionState._NORMAL_TRIGGER
self.old_test_trigger = cpp_style._FunctionState._TEST_TRIGGER
cpp_style._FunctionState._NORMAL_TRIGGER = 10
cpp_style._FunctionState._TEST_TRIGGER = 25
def tearDown(self):
cpp_style._FunctionState._NORMAL_TRIGGER = self.old_normal_trigger
cpp_style._FunctionState._TEST_TRIGGER = self.old_test_trigger
# FIXME: Eliminate the need for this function.
def set_min_confidence(self, min_confidence):
"""Set new test confidence and return old test confidence."""
old_min_confidence = self.min_confidence
self.min_confidence = min_confidence
return old_min_confidence
def assert_function_lengths_check(self, code, expected_message):
"""Check warnings for long function bodies are as expected.
Args:
code: C++ source code expected to generate a warning message.
expected_message: Message expected to be generated by the C++ code.
"""
self.assertEqual(expected_message,
self.perform_function_lengths_check(code))
def trigger_lines(self, error_level):
"""Return number of lines needed to trigger a function length warning.
Args:
error_level: --v setting for cpp_style.
Returns:
Number of lines needed to trigger a function length warning.
"""
return cpp_style._FunctionState._NORMAL_TRIGGER * 2 ** error_level
def trigger_test_lines(self, error_level):
"""Return number of lines needed to trigger a test function length warning.
Args:
error_level: --v setting for cpp_style.
Returns:
Number of lines needed to trigger a test function length warning.
"""
return cpp_style._FunctionState._TEST_TRIGGER * 2 ** error_level
def assert_function_length_check_definition(self, lines, error_level):
"""Generate long function definition and check warnings are as expected.
Args:
lines: Number of lines to generate.
error_level: --v setting for cpp_style.
"""
trigger_level = self.trigger_lines(self.min_confidence)
self.assert_function_lengths_check(
'void test(int x)' + self.function_body(lines),
('Small and focused functions are preferred: '
'test() has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]'
% (lines, trigger_level, error_level)))
def assert_function_length_check_definition_ok(self, lines):
"""Generate shorter function definition and check no warning is produced.
Args:
lines: Number of lines to generate.
"""
self.assert_function_lengths_check(
'void test(int x)' + self.function_body(lines),
'')
def assert_function_length_check_at_error_level(self, error_level):
"""Generate and check function at the trigger level for --v setting.
Args:
error_level: --v setting for cpp_style.
"""
self.assert_function_length_check_definition(self.trigger_lines(error_level),
error_level)
def assert_function_length_check_below_error_level(self, error_level):
"""Generate and check function just below the trigger level for --v setting.
Args:
error_level: --v setting for cpp_style.
"""
self.assert_function_length_check_definition(self.trigger_lines(error_level) - 1,
error_level - 1)
def assert_function_length_check_above_error_level(self, error_level):
"""Generate and check function just above the trigger level for --v setting.
Args:
error_level: --v setting for cpp_style.
"""
self.assert_function_length_check_definition(self.trigger_lines(error_level) + 1,
error_level)
def function_body(self, number_of_lines):
return ' {\n' + ' this_is_just_a_test();\n' * number_of_lines + '}'
def function_body_with_blank_lines(self, number_of_lines):
return ' {\n' + ' this_is_just_a_test();\n\n' * number_of_lines + '}'
def function_body_with_no_lints(self, number_of_lines):
return ' {\n' + ' this_is_just_a_test(); // NOLINT\n' * number_of_lines + '}'
# Test line length checks.
def test_function_length_check_declaration(self):
self.assert_function_lengths_check(
'void test();', # Not a function definition
'')
def test_function_length_check_declaration_with_block_following(self):
self.assert_function_lengths_check(
('void test();\n'
+ self.function_body(66)), # Not a function definition
'')
def test_function_length_check_class_definition(self):
self.assert_function_lengths_check( # Not a function definition
'class Test' + self.function_body(66) + ';',
'')
def test_function_length_check_trivial(self):
self.assert_function_lengths_check(
'void test() {}', # Not counted
'')
def test_function_length_check_empty(self):
self.assert_function_lengths_check(
'void test() {\n}',
'')
def test_function_length_check_definition_below_severity0(self):
old_min_confidence = self.set_min_confidence(0)
self.assert_function_length_check_definition_ok(self.trigger_lines(0) - 1)
self.set_min_confidence(old_min_confidence)
def test_function_length_check_definition_at_severity0(self):
old_min_confidence = self.set_min_confidence(0)
self.assert_function_length_check_definition_ok(self.trigger_lines(0))
self.set_min_confidence(old_min_confidence)
def test_function_length_check_definition_above_severity0(self):
old_min_confidence = self.set_min_confidence(0)
self.assert_function_length_check_above_error_level(0)
self.set_min_confidence(old_min_confidence)
def test_function_length_check_definition_below_severity1v0(self):
old_min_confidence = self.set_min_confidence(0)
self.assert_function_length_check_below_error_level(1)
self.set_min_confidence(old_min_confidence)
def test_function_length_check_definition_at_severity1v0(self):
old_min_confidence = self.set_min_confidence(0)
self.assert_function_length_check_at_error_level(1)
self.set_min_confidence(old_min_confidence)
def test_function_length_check_definition_below_severity1(self):
self.assert_function_length_check_definition_ok(self.trigger_lines(1) - 1)
def test_function_length_check_definition_at_severity1(self):
self.assert_function_length_check_definition_ok(self.trigger_lines(1))
def test_function_length_check_definition_above_severity1(self):
self.assert_function_length_check_above_error_level(1)
def test_function_length_check_definition_severity1_plus_indented(self):
error_level = 1
error_lines = self.trigger_lines(error_level) + 1
trigger_level = self.trigger_lines(self.min_confidence)
indent_spaces = ' '
self.assert_function_lengths_check(
re.sub(r'(?m)^(.)', indent_spaces + r'\1',
'void test_indent(int x)\n' + self.function_body(error_lines)),
('Small and focused functions are preferred: '
'test_indent() has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def test_function_length_check_definition_severity1_plus_blanks(self):
error_level = 1
error_lines = self.trigger_lines(error_level) + 1
trigger_level = self.trigger_lines(self.min_confidence)
self.assert_function_lengths_check(
'void test_blanks(int x)' + self.function_body(error_lines),
('Small and focused functions are preferred: '
'test_blanks() has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def test_function_length_check_complex_definition_severity1(self):
error_level = 1
error_lines = self.trigger_lines(error_level) + 1
trigger_level = self.trigger_lines(self.min_confidence)
self.assert_function_lengths_check(
('my_namespace::my_other_namespace::MyVeryLongTypeName<Type1, bool func(const Element*)>*\n'
'my_namespace::my_other_namespace<Type3, Type4>::~MyFunction<Type5<Type6, Type7> >(int arg1, char* arg2)'
+ self.function_body(error_lines)),
('Small and focused functions are preferred: '
'my_namespace::my_other_namespace<Type3, Type4>::~MyFunction<Type5<Type6, Type7> >()'
' has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def test_function_length_check_definition_severity1_for_test(self):
error_level = 1
error_lines = self.trigger_test_lines(error_level) + 1
trigger_level = self.trigger_test_lines(self.min_confidence)
self.assert_function_lengths_check(
'TEST_F(Test, Mutator)' + self.function_body(error_lines),
('Small and focused functions are preferred: '
'TEST_F(Test, Mutator) has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def test_function_length_check_definition_severity1_for_split_line_test(self):
error_level = 1
error_lines = self.trigger_test_lines(error_level) + 1
trigger_level = self.trigger_test_lines(self.min_confidence)
self.assert_function_lengths_check(
('TEST_F(GoogleUpdateRecoveryRegistryProtectedTest,\n'
' FixGoogleUpdate_AllValues_MachineApp)' # note: 4 spaces
+ self.function_body(error_lines)),
('Small and focused functions are preferred: '
'TEST_F(GoogleUpdateRecoveryRegistryProtectedTest, ' # 1 space
'FixGoogleUpdate_AllValues_MachineApp) has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def test_function_length_check_definition_severity1_for_bad_test_doesnt_break(self):
error_level = 1
error_lines = self.trigger_test_lines(error_level) + 1
trigger_level = self.trigger_test_lines(self.min_confidence)
# Since the function name isn't valid, the function detection algorithm
# will skip it, so no error is produced.
self.assert_function_lengths_check(
('TEST_F('
+ self.function_body(error_lines)),
'')
def test_function_length_check_definition_severity1_with_embedded_no_lints(self):
error_level = 1
error_lines = self.trigger_lines(error_level) + 1
trigger_level = self.trigger_lines(self.min_confidence)
self.assert_function_lengths_check(
'void test(int x)' + self.function_body_with_no_lints(error_lines),
('Small and focused functions are preferred: '
'test() has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def test_function_length_check_definition_severity1_with_no_lint(self):
self.assert_function_lengths_check(
('void test(int x)' + self.function_body(self.trigger_lines(1))
+ ' // NOLINT -- long function'),
'')
def test_function_length_check_definition_below_severity2(self):
self.assert_function_length_check_below_error_level(2)
def test_function_length_check_definition_severity2(self):
self.assert_function_length_check_at_error_level(2)
def test_function_length_check_definition_above_severity2(self):
self.assert_function_length_check_above_error_level(2)
def test_function_length_check_definition_below_severity3(self):
self.assert_function_length_check_below_error_level(3)
def test_function_length_check_definition_severity3(self):
self.assert_function_length_check_at_error_level(3)
def test_function_length_check_definition_above_severity3(self):
self.assert_function_length_check_above_error_level(3)
def test_function_length_check_definition_below_severity4(self):
self.assert_function_length_check_below_error_level(4)
def test_function_length_check_definition_severity4(self):
self.assert_function_length_check_at_error_level(4)
def test_function_length_check_definition_above_severity4(self):
self.assert_function_length_check_above_error_level(4)
def test_function_length_check_definition_below_severity5(self):
self.assert_function_length_check_below_error_level(5)
def test_function_length_check_definition_at_severity5(self):
self.assert_function_length_check_at_error_level(5)
def test_function_length_check_definition_above_severity5(self):
self.assert_function_length_check_above_error_level(5)
def test_function_length_check_definition_huge_lines(self):
# 5 is the limit
self.assert_function_length_check_definition(self.trigger_lines(6), 5)
def test_function_length_not_determinable(self):
# Macro invocation without terminating semicolon.
self.assert_function_lengths_check(
'MACRO(arg)',
'')
# Macro with underscores
self.assert_function_lengths_check(
'MACRO_WITH_UNDERSCORES(arg1, arg2, arg3)',
'')
self.assert_function_lengths_check(
'NonMacro(arg)',
'Lint failed to find start of function body.'
' [readability/fn_size] [5]')
class NoNonVirtualDestructorsTest(CppStyleTestBase):
def test_no_error(self):
self.assert_multi_line_lint(
'''\
class Foo {
virtual ~Foo();
virtual void foo();
};''',
'')
self.assert_multi_line_lint(
'''\
class Foo {
virtual inline ~Foo();
virtual void foo();
};''',
'')
self.assert_multi_line_lint(
'''\
class Foo {
inline virtual ~Foo();
virtual void foo();
};''',
'')
self.assert_multi_line_lint(
'''\
class Foo::Goo {
virtual ~Goo();
virtual void goo();
};''',
'')
self.assert_multi_line_lint(
'class Foo { void foo(); };',
'More than one command on the same line [whitespace/newline] [4]')
self.assert_multi_line_lint(
'class MyClass {\n'
' int getIntValue() { ASSERT(m_ptr); return *m_ptr; }\n'
'};\n',
'')
self.assert_multi_line_lint(
'class MyClass {\n'
' int getIntValue()\n'
' {\n'
' ASSERT(m_ptr); return *m_ptr;\n'
' }\n'
'};\n',
'More than one command on the same line [whitespace/newline] [4]')
self.assert_multi_line_lint(
'''\
class Qualified::Goo : public Foo {
virtual void goo();
};''',
'')
def test_no_destructor_when_virtual_needed(self):
self.assert_multi_line_lint_re(
'''\
class Foo {
virtual void foo();
};''',
'The class Foo probably needs a virtual destructor')
def test_enum_casing(self):
self.assert_multi_line_lint(
'''\
enum Foo {
FOO_ONE = 1,
FOO_TWO
};
enum { FOO_ONE };
enum {FooOne, fooTwo};
enum {
FOO_ONE
};''',
['enum members should use InterCaps with an initial capital letter. [readability/enum_casing] [4]'] * 5)
self.assert_multi_line_lint(
'''\
enum Foo {
fooOne = 1,
FooTwo = 2
};''',
'enum members should use InterCaps with an initial capital letter. [readability/enum_casing] [4]')
self.assert_multi_line_lint(
'''\
enum Foo {
FooOne = 1,
FooTwo,
kFooConst,
} fooVar = FooOne;
enum { FooOne, FooTwo };
enum { FooOne, FooTwo } fooVar = FooTwo;
enum { FooOne= FooTwo } foo;
enum Enum123 {
FooOne,
FooTwo = FooOne,
};''',
'')
self.assert_multi_line_lint(
'''\
// WebIDL enum
enum Foo {
FOO_ONE = 1,
FOO_TWO = 2,
};''',
'')
self.assert_multi_line_lint(
'''\
// WebKitIDL enum
enum Foo { FOO_ONE, FOO_TWO };''',
'')
def test_destructor_non_virtual_when_virtual_needed(self):
self.assert_multi_line_lint_re(
'''\
class Foo {
~Foo();
virtual void foo();
};''',
'The class Foo probably needs a virtual destructor')
def test_no_warn_when_derived(self):
self.assert_multi_line_lint(
'''\
class Foo : public Goo {
virtual void foo();
};''',
'')
def test_internal_braces(self):
self.assert_multi_line_lint_re(
'''\
class Foo {
enum Goo {
Goo
};
virtual void foo();
};''',
'The class Foo probably needs a virtual destructor')
def test_inner_class_needs_virtual_destructor(self):
self.assert_multi_line_lint_re(
'''\
class Foo {
class Goo {
virtual void goo();
};
};''',
'The class Goo probably needs a virtual destructor')
def test_outer_class_needs_virtual_destructor(self):
self.assert_multi_line_lint_re(
'''\
class Foo {
class Goo {
};
virtual void foo();
};''',
'The class Foo probably needs a virtual destructor')
def test_qualified_class_needs_virtual_destructor(self):
self.assert_multi_line_lint_re(
'''\
class Qualified::Foo {
virtual void foo();
};''',
'The class Qualified::Foo probably needs a virtual destructor')
def test_multi_line_declaration_no_error(self):
self.assert_multi_line_lint_re(
'''\
class Foo
: public Goo {
virtual void foo();
};''',
'')
def test_multi_line_declaration_with_error(self):
self.assert_multi_line_lint(
'''\
class Foo
{
virtual void foo();
};''',
['This { should be at the end of the previous line '
'[whitespace/braces] [4]',
'The class Foo probably needs a virtual destructor due to having '
'virtual method(s), one declared at line 3. [runtime/virtual] [4]'])
class PassPtrTest(CppStyleTestBase):
# For http://webkit.org/coding/RefPtr.html
def assert_pass_ptr_check(self, code, expected_message):
"""Check warnings for Pass*Ptr are as expected.
Args:
code: C++ source code expected to generate a warning message.
expected_message: Message expected to be generated by the C++ code.
"""
self.assertEqual(expected_message,
self.perform_pass_ptr_check(code))
def test_pass_ref_ptr_in_function(self):
self.assert_pass_ptr_check(
'int myFunction()\n'
'{\n'
' PassRefPtr<Type1> variable = variable2;\n'
'}',
'Local variables should never be PassRefPtr (see '
'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]')
def test_pass_own_ptr_in_function(self):
self.assert_pass_ptr_check(
'int myFunction()\n'
'{\n'
' PassOwnPtr<Type1> variable = variable2;\n'
'}',
'Local variables should never be PassOwnPtr (see '
'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]')
def test_pass_other_type_ptr_in_function(self):
self.assert_pass_ptr_check(
'int myFunction()\n'
'{\n'
' PassOtherTypePtr<Type1> variable;\n'
'}',
'Local variables should never be PassOtherTypePtr (see '
'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]')
def test_pass_ref_ptr_return_value(self):
self.assert_pass_ptr_check(
'PassRefPtr<Type1>\n'
'myFunction(int)\n'
'{\n'
'}',
'')
self.assert_pass_ptr_check(
'PassRefPtr<Type1> myFunction(int)\n'
'{\n'
'}',
'')
self.assert_pass_ptr_check(
'PassRefPtr<Type1> myFunction();\n',
'')
self.assert_pass_ptr_check(
'OwnRefPtr<Type1> myFunction();\n',
'')
self.assert_pass_ptr_check(
'RefPtr<Type1> myFunction(int)\n'
'{\n'
'}',
'The return type should use PassRefPtr instead of RefPtr. [readability/pass_ptr] [5]')
self.assert_pass_ptr_check(
'OwnPtr<Type1> myFunction(int)\n'
'{\n'
'}',
'The return type should use PassOwnPtr instead of OwnPtr. [readability/pass_ptr] [5]')
self.assert_pass_ptr_check(
'RefPtrWillBeRawPtr<Type1> myFunction(int)\n'
'{\n'
'}',
'The return type should use PassRefPtrWillBeRawPtr instead of RefPtrWillBeRawPtr. [readability/pass_ptr] [5]')
self.assert_pass_ptr_check(
'OwnPtrWillBeRawPtr<Type1> myFunction(int)\n'
'{\n'
'}',
'The return type should use PassOwnPtrWillBeRawPtr instead of OwnPtrWillBeRawPtr. [readability/pass_ptr] [5]')
def test_ref_ptr_parameter_value(self):
self.assert_pass_ptr_check(
'int myFunction(PassRefPtr<Type1>)\n'
'{\n'
'}',
'')
self.assert_pass_ptr_check(
'int myFunction(RefPtr<Type1>)\n'
'{\n'
'}',
'The parameter type should use PassRefPtr instead of RefPtr. [readability/pass_ptr] [5]')
self.assert_pass_ptr_check(
'int myFunction(RefPtr<Type1>&)\n'
'{\n'
'}',
'')
self.assert_pass_ptr_check(
'int myFunction(RefPtr<Type1>*)\n'
'{\n'
'}',
'')
self.assert_pass_ptr_check(
'int myFunction(RefPtr<Type1>* = 0)\n'
'{\n'
'}',
'')
self.assert_pass_ptr_check(
'int myFunction(RefPtr<Type1>* = 0)\n'
'{\n'
'}',
'')
def test_own_ptr_parameter_value(self):
self.assert_pass_ptr_check(
'int myFunction(PassOwnPtr<Type1>)\n'
'{\n'
'}',
'')
self.assert_pass_ptr_check(
'int myFunction(OwnPtr<Type1>)\n'
'{\n'
'}',
'The parameter type should use PassOwnPtr instead of OwnPtr. [readability/pass_ptr] [5]')
self.assert_pass_ptr_check(
'int myFunction(OwnPtr<Type1>& simple)\n'
'{\n'
'}',
'')
def test_ref_ptr_member_variable(self):
self.assert_pass_ptr_check(
'class Foo {'
' RefPtr<Type1> m_other;\n'
'};\n',
'')
class LeakyPatternTest(CppStyleTestBase):
def assert_leaky_pattern_check(self, code, expected_message):
"""Check warnings for leaky patterns are as expected.
Args:
code: C++ source code expected to generate a warning message.
expected_message: Message expected to be generated by the C++ code.
"""
self.assertEqual(expected_message,
self.perform_leaky_pattern_check(code))
def test_get_dc(self):
self.assert_leaky_pattern_check(
'HDC hdc = GetDC(hwnd);',
'Use the class HWndDC instead of calling GetDC to avoid potential '
'memory leaks. [runtime/leaky_pattern] [5]')
def test_get_dc(self):
self.assert_leaky_pattern_check(
'HDC hdc = GetDCEx(hwnd, 0, 0);',
'Use the class HWndDC instead of calling GetDCEx to avoid potential '
'memory leaks. [runtime/leaky_pattern] [5]')
def test_own_get_dc(self):
self.assert_leaky_pattern_check(
'HWndDC hdc(hwnd);',
'')
def test_create_dc(self):
self.assert_leaky_pattern_check(
'HDC dc2 = ::CreateDC();',
'Use adoptPtr and OwnPtr<HDC> when calling CreateDC to avoid potential '
'memory leaks. [runtime/leaky_pattern] [5]')
self.assert_leaky_pattern_check(
'adoptPtr(CreateDC());',
'')
def test_create_compatible_dc(self):
self.assert_leaky_pattern_check(
'HDC dc2 = CreateCompatibleDC(dc);',
'Use adoptPtr and OwnPtr<HDC> when calling CreateCompatibleDC to avoid potential '
'memory leaks. [runtime/leaky_pattern] [5]')
self.assert_leaky_pattern_check(
'adoptPtr(CreateCompatibleDC(dc));',
'')
class WebKitStyleTest(CppStyleTestBase):
# for http://webkit.org/coding/coding-style.html
def test_indentation(self):
# 1. Use spaces, not tabs. Tabs should only appear in files that
# require them for semantic meaning, like Makefiles.
self.assert_multi_line_lint(
'class Foo {\n'
' int goo;\n'
'};',
'')
self.assert_multi_line_lint(
'class Foo {\n'
'\tint goo;\n'
'};',
'Tab found; better to use spaces [whitespace/tab] [1]')
# 2. The indent size is 4 spaces.
self.assert_multi_line_lint(
'class Foo {\n'
' int goo;\n'
'};',
'')
self.assert_multi_line_lint(
'class Foo {\n'
' int goo;\n'
'};',
'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]')
# 3. In a header, code inside a namespace should not be indented.
self.assert_multi_line_lint(
'namespace WebCore {\n\n'
'class Document {\n'
' int myVariable;\n'
'};\n'
'}',
'',
'foo.h')
self.assert_multi_line_lint(
'namespace OuterNamespace {\n'
' namespace InnerNamespace {\n'
' class Document {\n'
'};\n'
'};\n'
'}',
'Code inside a namespace should not be indented. [whitespace/indent] [4]',
'foo.h')
self.assert_multi_line_lint(
'namespace OuterNamespace {\n'
' class Document {\n'
' namespace InnerNamespace {\n'
'};\n'
'};\n'
'}',
'Code inside a namespace should not be indented. [whitespace/indent] [4]',
'foo.h')
self.assert_multi_line_lint(
'namespace WebCore {\n'
'#if 0\n'
' class Document {\n'
'};\n'
'#endif\n'
'}',
'Code inside a namespace should not be indented. [whitespace/indent] [4]',
'foo.h')
self.assert_multi_line_lint(
'namespace WebCore {\n'
'class Document {\n'
'};\n'
'}',
'',
'foo.h')
# 4. In an implementation file (files with the extension .cpp, .c
# or .mm), code inside a namespace should not be indented.
self.assert_multi_line_lint(
'namespace WebCore {\n\n'
'Document::Foo()\n'
' : foo(bar)\n'
' , boo(far)\n'
'{\n'
' stuff();\n'
'}',
'',
'foo.cpp')
self.assert_multi_line_lint(
'namespace OuterNamespace {\n'
'namespace InnerNamespace {\n'
'Document::Foo() { }\n'
' void* p;\n'
'}\n'
'}\n',
'Code inside a namespace should not be indented. [whitespace/indent] [4]',
'foo.cpp')
self.assert_multi_line_lint(
'namespace OuterNamespace {\n'
'namespace InnerNamespace {\n'
'Document::Foo() { }\n'
'}\n'
' void* p;\n'
'}\n',
'Code inside a namespace should not be indented. [whitespace/indent] [4]',
'foo.cpp')
self.assert_multi_line_lint(
'namespace WebCore {\n\n'
' const char* foo = "start:;"\n'
' "dfsfsfs";\n'
'}\n',
'Code inside a namespace should not be indented. [whitespace/indent] [4]',
'foo.cpp')
self.assert_multi_line_lint(
'namespace WebCore {\n\n'
'const char* foo(void* a = ";", // ;\n'
' void* b);\n'
' void* p;\n'
'}\n',
'Code inside a namespace should not be indented. [whitespace/indent] [4]',
'foo.cpp')
self.assert_multi_line_lint(
'namespace WebCore {\n\n'
'const char* foo[] = {\n'
' "void* b);", // ;\n'
' "asfdf",\n'
' }\n'
' void* p;\n'
'}\n',
'Code inside a namespace should not be indented. [whitespace/indent] [4]',
'foo.cpp')
self.assert_multi_line_lint(
'namespace WebCore {\n\n'
'const char* foo[] = {\n'
' "void* b);", // }\n'
' "asfdf",\n'
' }\n'
'}\n',
'',
'foo.cpp')
self.assert_multi_line_lint(
' namespace WebCore {\n\n'
' void Document::Foo()\n'
' {\n'
'start: // infinite loops are fun!\n'
' goto start;\n'
' }',
'namespace should never be indented. [whitespace/indent] [4]',
'foo.cpp')
self.assert_multi_line_lint(
'namespace WebCore {\n'
' Document::Foo() { }\n'
'}',
'Code inside a namespace should not be indented.'
' [whitespace/indent] [4]',
'foo.cpp')
self.assert_multi_line_lint(
'namespace WebCore {\n'
'#define abc(x) x; \\\n'
' x\n'
'}',
'',
'foo.cpp')
self.assert_multi_line_lint(
'namespace WebCore {\n'
'#define abc(x) x; \\\n'
' x\n'
' void* x;'
'}',
'Code inside a namespace should not be indented. [whitespace/indent] [4]',
'foo.cpp')
# 5. A case label should line up with its switch statement. The
# case statement is indented.
self.assert_multi_line_lint(
' switch (condition) {\n'
' case fooCondition:\n'
' case barCondition:\n'
' i++;\n'
' break;\n'
' default:\n'
' i--;\n'
' }\n',
'')
self.assert_multi_line_lint(
' switch (condition) {\n'
' case fooCondition:\n'
' switch (otherCondition) {\n'
' default:\n'
' return;\n'
' }\n'
' default:\n'
' i--;\n'
' }\n',
'')
self.assert_multi_line_lint(
' switch (condition) {\n'
' case fooCondition: break;\n'
' default: return;\n'
' }\n',
'')
self.assert_multi_line_lint(
' switch (condition) {\n'
' case fooCondition:\n'
' case barCondition:\n'
' i++;\n'
' break;\n'
' default:\n'
' i--;\n'
' }\n',
'A case label should not be indented, but line up with its switch statement.'
' [whitespace/indent] [4]')
self.assert_multi_line_lint(
' switch (condition) {\n'
' case fooCondition:\n'
' break;\n'
' default:\n'
' i--;\n'
' }\n',
'A case label should not be indented, but line up with its switch statement.'
' [whitespace/indent] [4]')
self.assert_multi_line_lint(
' switch (condition) {\n'
' case fooCondition:\n'
' case barCondition:\n'
' switch (otherCondition) {\n'
' default:\n'
' return;\n'
' }\n'
' default:\n'
' i--;\n'
' }\n',
'A case label should not be indented, but line up with its switch statement.'
' [whitespace/indent] [4]')
self.assert_multi_line_lint(
' switch (condition) {\n'
' case fooCondition:\n'
' case barCondition:\n'
' i++;\n'
' break;\n\n'
' default:\n'
' i--;\n'
' }\n',
'Non-label code inside switch statements should be indented.'
' [whitespace/indent] [4]')
self.assert_multi_line_lint(
' switch (condition) {\n'
' case fooCondition:\n'
' case barCondition:\n'
' switch (otherCondition) {\n'
' default:\n'
' return;\n'
' }\n'
' default:\n'
' i--;\n'
' }\n',
'Non-label code inside switch statements should be indented.'
' [whitespace/indent] [4]')
# 6. Boolean expressions at the same nesting level that span
# multiple lines should have their operators on the left side of
# the line instead of the right side.
self.assert_multi_line_lint(
' return attr->name() == srcAttr\n'
' || attr->name() == lowsrcAttr;\n',
'')
self.assert_multi_line_lint(
' return attr->name() == srcAttr ||\n'
' attr->name() == lowsrcAttr;\n',
'Boolean expressions that span multiple lines should have their '
'operators on the left side of the line instead of the right side.'
' [whitespace/operators] [4]')
def test_spacing(self):
# 1. Do not place spaces around unary operators.
self.assert_multi_line_lint(
'i++;',
'')
self.assert_multi_line_lint(
'i ++;',
'Extra space for operator ++; [whitespace/operators] [4]')
# 2. Do place spaces around binary and ternary operators.
self.assert_multi_line_lint(
'y = m * x + b;',
'')
self.assert_multi_line_lint(
'f(a, b);',
'')
self.assert_multi_line_lint(
'c = a | b;',
'')
self.assert_multi_line_lint(
'return condition ? 1 : 0;',
'')
self.assert_multi_line_lint(
'y=m*x+b;',
'Missing spaces around = [whitespace/operators] [4]')
self.assert_multi_line_lint(
'f(a,b);',
'Missing space after , [whitespace/comma] [3]')
self.assert_multi_line_lint(
'c = a|b;',
'Missing spaces around | [whitespace/operators] [3]')
# FIXME: We cannot catch this lint error.
# self.assert_multi_line_lint(
# 'return condition ? 1:0;',
# '')
# 3. Place spaces between control statements and their parentheses.
self.assert_multi_line_lint(
' if (condition)\n'
' doIt();\n',
'')
self.assert_multi_line_lint(
' if(condition)\n'
' doIt();\n',
'Missing space before ( in if( [whitespace/parens] [5]')
# 4. Do not place spaces between a function and its parentheses,
# or between a parenthesis and its content.
self.assert_multi_line_lint(
'f(a, b);',
'')
self.assert_multi_line_lint(
'f (a, b);',
'Extra space before ( in function call [whitespace/parens] [4]')
self.assert_multi_line_lint(
'f( a, b );',
['Extra space after ( in function call [whitespace/parens] [4]',
'Extra space before ) [whitespace/parens] [2]'])
def test_line_breaking(self):
# 1. Each statement should get its own line.
self.assert_multi_line_lint(
' x++;\n'
' y++;\n'
' if (condition);\n'
' doIt();\n',
'')
self.assert_multi_line_lint(
' if (condition) \\\n'
' doIt();\n',
'')
self.assert_multi_line_lint(
' x++; y++;',
'More than one command on the same line [whitespace/newline] [4]')
self.assert_multi_line_lint(
' if (condition) doIt();\n',
'More than one command on the same line in if [whitespace/parens] [4]')
# Ensure that having a # in the line doesn't hide the error.
self.assert_multi_line_lint(
' x++; char a[] = "#";',
'More than one command on the same line [whitespace/newline] [4]')
# Ignore preprocessor if's.
self.assert_multi_line_lint(
'#if (condition) || (condition2)\n',
'')
# 2. An else statement should go on the same line as a preceding
# close brace if one is present, else it should line up with the
# if statement.
self.assert_multi_line_lint(
'if (condition) {\n'
' doSomething();\n'
' doSomethingAgain();\n'
'} else {\n'
' doSomethingElse();\n'
' doSomethingElseAgain();\n'
'}\n',
'')
self.assert_multi_line_lint(
'if (condition)\n'
' doSomething();\n'
'else\n'
' doSomethingElse();\n',
'')
self.assert_multi_line_lint(
'if (condition) {\n'
' doSomething();\n'
'} else {\n'
' doSomethingElse();\n'
' doSomethingElseAgain();\n'
'}\n',
'')
self.assert_multi_line_lint(
'#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n',
'')
self.assert_multi_line_lint(
'#define TEST_ASSERT(expression) do { if ( !(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n',
'Extra space after ( in if [whitespace/parens] [5]')
# FIXME: currently we only check first conditional, so we cannot detect errors in next ones.
# self.assert_multi_line_lint(
# '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0 )\n',
# 'Mismatching spaces inside () in if [whitespace/parens] [5]')
self.assert_multi_line_lint(
'WTF_MAKE_NONCOPYABLE(ClassName); WTF_MAKE_FAST_ALLOCATED;\n',
'')
self.assert_multi_line_lint(
'if (condition) {\n'
' doSomething();\n'
' doSomethingAgain();\n'
'}\n'
'else {\n'
' doSomethingElse();\n'
' doSomethingElseAgain();\n'
'}\n',
'An else should appear on the same line as the preceding } [whitespace/newline] [4]')
self.assert_multi_line_lint(
'if (condition) doSomething(); else doSomethingElse();\n',
['More than one command on the same line [whitespace/newline] [4]',
'Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]',
'More than one command on the same line in if [whitespace/parens] [4]'])
self.assert_multi_line_lint(
'if (condition) doSomething(); else {\n'
' doSomethingElse();\n'
'}\n',
['More than one command on the same line in if [whitespace/parens] [4]',
'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]'])
self.assert_multi_line_lint(
'void func()\n'
'{\n'
' while (condition) { }\n'
' return 0;\n'
'}\n',
'')
self.assert_multi_line_lint(
'void func()\n'
'{\n'
' for (i = 0; i < 42; i++) { foobar(); }\n'
' return 0;\n'
'}\n',
'More than one command on the same line in for [whitespace/parens] [4]')
# 3. An else if statement should be written as an if statement
# when the prior if concludes with a return statement.
self.assert_multi_line_lint(
'if (motivated) {\n'
' if (liquid)\n'
' return money;\n'
'} else if (tired) {\n'
' break;\n'
'}',
'')
self.assert_multi_line_lint(
'if (condition)\n'
' doSomething();\n'
'else if (otherCondition)\n'
' doSomethingElse();\n',
'')
self.assert_multi_line_lint(
'if (condition)\n'
' doSomething();\n'
'else\n'
' doSomethingElse();\n',
'')
self.assert_multi_line_lint(
'if (condition)\n'
' returnValue = foo;\n'
'else if (otherCondition)\n'
' returnValue = bar;\n',
'')
self.assert_multi_line_lint(
'if (condition)\n'
' returnValue = foo;\n'
'else\n'
' returnValue = bar;\n',
'')
self.assert_multi_line_lint(
'if (condition)\n'
' doSomething();\n'
'else if (liquid)\n'
' return money;\n'
'else if (broke)\n'
' return favor;\n'
'else\n'
' sleep(28800);\n',
'')
self.assert_multi_line_lint(
'if (liquid) {\n'
' prepare();\n'
' return money;\n'
'} else if (greedy) {\n'
' keep();\n'
' return nothing;\n'
'}\n',
'An else if statement should be written as an if statement when the '
'prior "if" concludes with a return, break, continue or goto statement.'
' [readability/control_flow] [4]')
self.assert_multi_line_lint(
' if (stupid) {\n'
'infiniteLoop:\n'
' goto infiniteLoop;\n'
' } else if (evil)\n'
' goto hell;\n',
['If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]',
'An else if statement should be written as an if statement when the '
'prior "if" concludes with a return, break, continue or goto statement.'
' [readability/control_flow] [4]'])
self.assert_multi_line_lint(
'if (liquid)\n'
'{\n'
' prepare();\n'
' return money;\n'
'}\n'
'else if (greedy)\n'
' keep();\n',
['If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]',
'This { should be at the end of the previous line [whitespace/braces] [4]',
'An else should appear on the same line as the preceding } [whitespace/newline] [4]',
'An else if statement should be written as an if statement when the '
'prior "if" concludes with a return, break, continue or goto statement.'
' [readability/control_flow] [4]'])
self.assert_multi_line_lint(
'if (gone)\n'
' return;\n'
'else if (here)\n'
' go();\n',
'An else if statement should be written as an if statement when the '
'prior "if" concludes with a return, break, continue or goto statement.'
' [readability/control_flow] [4]')
self.assert_multi_line_lint(
'if (gone)\n'
' return;\n'
'else\n'
' go();\n',
'An else statement can be removed when the prior "if" concludes '
'with a return, break, continue or goto statement.'
' [readability/control_flow] [4]')
self.assert_multi_line_lint(
'if (motivated) {\n'
' prepare();\n'
' continue;\n'
'} else {\n'
' cleanUp();\n'
' break;\n'
'}\n',
'An else statement can be removed when the prior "if" concludes '
'with a return, break, continue or goto statement.'
' [readability/control_flow] [4]')
self.assert_multi_line_lint(
'if (tired)\n'
' break;\n'
'else {\n'
' prepare();\n'
' continue;\n'
'}\n',
['If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]',
'An else statement can be removed when the prior "if" concludes '
'with a return, break, continue or goto statement.'
' [readability/control_flow] [4]'])
def test_braces(self):
# 1. Function definitions: place each brace on its own line.
self.assert_multi_line_lint(
'int main()\n'
'{\n'
' doSomething();\n'
'}\n',
'')
self.assert_multi_line_lint(
'int main() {\n'
' doSomething();\n'
'}\n',
'Place brace on its own line for function definitions. [whitespace/braces] [4]')
# 2. Other braces: place the open brace on the line preceding the
# code block; place the close brace on its own line.
self.assert_multi_line_lint(
'class MyClass {\n'
' int foo;\n'
'};\n',
'')
self.assert_multi_line_lint(
'namespace WebCore {\n'
'int foo;\n'
'};\n',
'')
self.assert_multi_line_lint(
'for (int i = 0; i < 10; i++) {\n'
' DoSomething();\n'
'};\n',
'')
self.assert_multi_line_lint(
'class MyClass\n'
'{\n'
' int foo;\n'
'};\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
self.assert_multi_line_lint(
'if (condition)\n'
'{\n'
' int foo;\n'
'}\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
self.assert_multi_line_lint(
'for (int i = 0; i < 10; i++)\n'
'{\n'
' int foo;\n'
'}\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
self.assert_multi_line_lint(
'while (true)\n'
'{\n'
' int foo;\n'
'}\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
self.assert_multi_line_lint(
'foreach (Foo* foo, foos)\n'
'{\n'
' int bar;\n'
'}\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
self.assert_multi_line_lint(
'switch (type)\n'
'{\n'
'case foo: return;\n'
'}\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
self.assert_multi_line_lint(
'if (condition)\n'
'{\n'
' int foo;\n'
'}\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
self.assert_multi_line_lint(
'for (int i = 0; i < 10; i++)\n'
'{\n'
' int foo;\n'
'}\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
self.assert_multi_line_lint(
'while (true)\n'
'{\n'
' int foo;\n'
'}\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
self.assert_multi_line_lint(
'switch (type)\n'
'{\n'
'case foo: return;\n'
'}\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
self.assert_multi_line_lint(
'else if (type)\n'
'{\n'
'case foo: return;\n'
'}\n',
'This { should be at the end of the previous line [whitespace/braces] [4]')
# 3. Curly braces are not required for single-line conditionals and
# loop bodies, but are required for single-statement bodies that
# span multiple lines.
#
# Positive tests
#
self.assert_multi_line_lint(
'if (condition1)\n'
' statement1();\n'
'else\n'
' statement2();\n',
'')
self.assert_multi_line_lint(
'if (condition1)\n'
' statement1();\n'
'else if (condition2)\n'
' statement2();\n',
'')
self.assert_multi_line_lint(
'if (condition1)\n'
' statement1();\n'
'else if (condition2)\n'
' statement2();\n'
'else\n'
' statement3();\n',
'')
self.assert_multi_line_lint(
'for (; foo; bar)\n'
' int foo;\n',
'')
self.assert_multi_line_lint(
'for (; foo; bar) {\n'
' int foo;\n'
'}\n',
'')
self.assert_multi_line_lint(
'foreach (foo, foos) {\n'
' int bar;\n'
'}\n',
'')
self.assert_multi_line_lint(
'foreach (foo, foos)\n'
' int bar;\n',
'')
self.assert_multi_line_lint(
'while (true) {\n'
' int foo;\n'
'}\n',
'')
self.assert_multi_line_lint(
'while (true)\n'
' int foo;\n',
'')
self.assert_multi_line_lint(
'if (condition1) {\n'
' statement1();\n'
'} else {\n'
' statement2();\n'
'}\n',
'')
self.assert_multi_line_lint(
'if (condition1) {\n'
' statement1();\n'
'} else if (condition2) {\n'
' statement2();\n'
'}\n',
'')
self.assert_multi_line_lint(
'if (condition1) {\n'
' statement1();\n'
'} else if (condition2) {\n'
' statement2();\n'
'} else {\n'
' statement3();\n'
'}\n',
'')
self.assert_multi_line_lint(
'if (condition1) {\n'
' statement1();\n'
' statement1_2();\n'
'} else if (condition2) {\n'
' statement2();\n'
' statement2_2();\n'
'}\n',
'')
self.assert_multi_line_lint(
'if (condition1) {\n'
' statement1();\n'
' statement1_2();\n'
'} else if (condition2) {\n'
' statement2();\n'
' statement2_2();\n'
'} else {\n'
' statement3();\n'
' statement3_2();\n'
'}\n',
'')
#
# Negative tests
#
self.assert_multi_line_lint(
'if (condition)\n'
' doSomething(\n'
' spanningMultipleLines);\n',
'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'if (condition)\n'
' // Single-line comment\n'
' doSomething();\n',
'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'if (condition1)\n'
' statement1();\n'
'else if (condition2)\n'
' // Single-line comment\n'
' statement2();\n',
'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'if (condition1)\n'
' statement1();\n'
'else if (condition2)\n'
' statement2();\n'
'else\n'
' // Single-line comment\n'
' statement3();\n',
'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'for (; foo; bar)\n'
' // Single-line comment\n'
' int foo;\n',
'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'foreach (foo, foos)\n'
' // Single-line comment\n'
' int bar;\n',
'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'while (true)\n'
' // Single-line comment\n'
' int foo;\n'
'\n',
'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
# 4. If one part of an if-else statement uses curly braces, the
# other part must too.
self.assert_multi_line_lint(
'if (condition1) {\n'
' doSomething1();\n'
' doSomething1_2();\n'
'} else if (condition2)\n'
' doSomething2();\n'
'else\n'
' doSomething3();\n',
'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'if (condition1)\n'
' doSomething1();\n'
'else if (condition2) {\n'
' doSomething2();\n'
' doSomething2_2();\n'
'} else\n'
' doSomething3();\n',
'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'if (condition1) {\n'
' doSomething1();\n'
'} else if (condition2) {\n'
' doSomething2();\n'
' doSomething2_2();\n'
'} else\n'
' doSomething3();\n',
'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'if (condition1)\n'
' doSomething1();\n'
'else if (condition2)\n'
' doSomething2();\n'
'else {\n'
' doSomething3();\n'
' doSomething3_2();\n'
'}\n',
'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'if (condition1) {\n'
' doSomething1();\n'
' doSomething1_2();\n'
'} else if (condition2)\n'
' doSomething2();\n'
'else {\n'
' doSomething3();\n'
' doSomething3_2();\n'
'}\n',
'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
self.assert_multi_line_lint(
'if (condition1)\n'
' doSomething1();\n'
'else if (condition2) {\n'
' doSomething2();\n'
' doSomething2_2();\n'
'} else {\n'
' doSomething3();\n'
' doSomething3_2();\n'
'}\n',
'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
# 5. Control clauses without a body should use empty braces.
self.assert_multi_line_lint(
'for ( ; current; current = current->next) { }\n',
'')
self.assert_multi_line_lint(
'for ( ; current;\n'
' current = current->next) { }\n',
'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]')
self.assert_multi_line_lint(
'for ( ; current; current = current->next);\n',
'Semicolon defining empty statement for this loop. Use { } instead. [whitespace/semicolon] [5]')
self.assert_multi_line_lint(
'while (true);\n',
'Semicolon defining empty statement for this loop. Use { } instead. [whitespace/semicolon] [5]')
self.assert_multi_line_lint(
'} while (true);\n',
'')
def test_null_false_zero(self):
# 1. In C++, the null pointer value should be written as 0. In C,
# it should be written as NULL. In Objective-C and Objective-C++,
# follow the guideline for C or C++, respectively, but use nil to
# represent a null Objective-C object.
self.assert_lint(
'functionCall(NULL)',
'Use 0 instead of NULL.'
' [readability/null] [5]',
'foo.cpp')
self.assert_lint(
"// Don't use NULL in comments since it isn't in code.",
'Use 0 or null instead of NULL (even in *comments*).'
' [readability/null] [4]',
'foo.cpp')
self.assert_lint(
'"A string with NULL" // and a comment with NULL is tricky to flag correctly in cpp_style.',
'Use 0 or null instead of NULL (even in *comments*).'
' [readability/null] [4]',
'foo.cpp')
self.assert_lint(
'"A string containing NULL is ok"',
'',
'foo.cpp')
self.assert_lint(
'if (aboutNULL)',
'',
'foo.cpp')
self.assert_lint(
'myVariable = NULLify',
'',
'foo.cpp')
# Make sure that the NULL check does not apply to C and Objective-C files.
self.assert_lint(
'functionCall(NULL)',
'',
'foo.c')
self.assert_lint(
'functionCall(NULL)',
'',
'foo.m')
# Make sure that the NULL check does not apply to g_object_{set,get} and
# g_str{join,concat}
self.assert_lint(
'g_object_get(foo, "prop", &bar, NULL);',
'')
self.assert_lint(
'g_object_set(foo, "prop", bar, NULL);',
'')
self.assert_lint(
'g_build_filename(foo, bar, NULL);',
'')
self.assert_lint(
'gst_bin_add_many(foo, bar, boo, NULL);',
'')
self.assert_lint(
'gst_bin_remove_many(foo, bar, boo, NULL);',
'')
self.assert_lint(
'gst_element_link_many(foo, bar, boo, NULL);',
'')
self.assert_lint(
'gst_element_unlink_many(foo, bar, boo, NULL);',
'')
self.assert_lint(
'gst_structure_get(foo, "value", G_TYPE_INT, &value, NULL);',
'')
self.assert_lint(
'gst_structure_set(foo, "value", G_TYPE_INT, value, NULL);',
'')
self.assert_lint(
'gst_structure_remove_fields(foo, "value", "bar", NULL);',
'')
self.assert_lint(
'gst_structure_new("foo", "value", G_TYPE_INT, value, NULL);',
'')
self.assert_lint(
'gst_structure_id_new(FOO, VALUE, G_TYPE_INT, value, NULL);',
'')
self.assert_lint(
'gst_structure_id_set(FOO, VALUE, G_TYPE_INT, value, NULL);',
'')
self.assert_lint(
'gst_structure_id_get(FOO, VALUE, G_TYPE_INT, &value, NULL);',
'')
self.assert_lint(
'gst_caps_new_simple(mime, "value", G_TYPE_INT, &value, NULL);',
'')
self.assert_lint(
'gst_caps_new_full(structure1, structure2, NULL);',
'')
self.assert_lint(
'gchar* result = g_strconcat("part1", "part2", "part3", NULL);',
'')
self.assert_lint(
'gchar* result = g_strconcat("part1", NULL);',
'')
self.assert_lint(
'gchar* result = g_strjoin(",", "part1", "part2", "part3", NULL);',
'')
self.assert_lint(
'gchar* result = g_strjoin(",", "part1", NULL);',
'')
self.assert_lint(
'gchar* result = gdk_pixbuf_save_to_callback(pixbuf, function, data, type, error, NULL);',
'')
self.assert_lint(
'gchar* result = gdk_pixbuf_save_to_buffer(pixbuf, function, data, type, error, NULL);',
'')
self.assert_lint(
'gchar* result = gdk_pixbuf_save_to_stream(pixbuf, function, data, type, error, NULL);',
'')
self.assert_lint(
'gtk_widget_style_get(style, "propertyName", &value, "otherName", &otherValue, NULL);',
'')
self.assert_lint(
'gtk_style_context_get_style(context, "propertyName", &value, "otherName", &otherValue, NULL);',
'')
self.assert_lint(
'gtk_style_context_get(context, static_cast<GtkStateFlags>(0), "property", &value, NULL);',
'')
self.assert_lint(
'gtk_widget_style_get_property(style, NULL, NULL);',
'Use 0 instead of NULL. [readability/null] [5]',
'foo.cpp')
self.assert_lint(
'gtk_widget_style_get_valist(style, NULL, NULL);',
'Use 0 instead of NULL. [readability/null] [5]',
'foo.cpp')
# 2. C++ and C bool values should be written as true and
# false. Objective-C BOOL values should be written as YES and NO.
# FIXME: Implement this.
# 3. Tests for true/false and null/non-null should be done without
# equality comparisons.
self.assert_lint_one_of_many_errors_re(
'if (string != NULL)',
r'Tests for true/false and null/non-null should be done without equality comparisons\.')
self.assert_lint(
'if (p == nullptr)',
'Tests for true/false and null/non-null should be done without equality comparisons.'
' [readability/comparison_to_boolean] [5]')
self.assert_lint(
'if (condition == true)',
'Tests for true/false and null/non-null should be done without equality comparisons.'
' [readability/comparison_to_boolean] [5]')
self.assert_lint(
'if (myVariable != /* Why would anyone put a comment here? */ false)',
'Tests for true/false and null/non-null should be done without equality comparisons.'
' [readability/comparison_to_boolean] [5]')
self.assert_lint_one_of_many_errors_re(
'if (NULL == thisMayBeNull)',
r'Tests for true/false and null/non-null should be done without equality comparisons\.')
self.assert_lint(
'if (nullptr /* funny place for a comment */ == p)',
'Tests for true/false and null/non-null should be done without equality comparisons.'
' [readability/comparison_to_boolean] [5]')
self.assert_lint(
'if (true != anotherCondition)',
'Tests for true/false and null/non-null should be done without equality comparisons.'
' [readability/comparison_to_boolean] [5]')
self.assert_lint(
'if (false == myBoolValue)',
'Tests for true/false and null/non-null should be done without equality comparisons.'
' [readability/comparison_to_boolean] [5]')
self.assert_lint(
'if (fontType == trueType)',
'')
self.assert_lint(
'if (othertrue == fontType)',
'')
self.assert_lint(
'if (LIKELY(foo == 0))',
'')
self.assert_lint(
'if (UNLIKELY(foo == 0))',
'')
self.assert_lint(
'if ((a - b) == 0.5)',
'')
self.assert_lint(
'if (0.5 == (a - b))',
'')
self.assert_lint(
'if (LIKELY(foo == NULL))',
'Use 0 instead of NULL. [readability/null] [5]')
self.assert_lint(
'if (UNLIKELY(foo == NULL))',
'Use 0 instead of NULL. [readability/null] [5]')
def test_directive_indentation(self):
self.assert_lint(
" #if FOO",
"preprocessor directives (e.g., #ifdef, #define, #import) should never be indented."
" [whitespace/indent] [4]",
"foo.cpp")
def test_using_std(self):
self.assert_lint(
'using std::min;',
"Use 'using namespace std;' instead of 'using std::min;'."
" [build/using_std] [4]",
'foo.cpp')
def test_using_std_swap_ignored(self):
self.assert_lint(
'using std::swap;',
'',
'foo.cpp')
def test_max_macro(self):
self.assert_lint(
'int i = MAX(0, 1);',
'',
'foo.c')
self.assert_lint(
'int i = MAX(0, 1);',
'Use std::max() or std::max<type>() instead of the MAX() macro.'
' [runtime/max_min_macros] [4]',
'foo.cpp')
self.assert_lint(
'inline int foo() { return MAX(0, 1); }',
'Use std::max() or std::max<type>() instead of the MAX() macro.'
' [runtime/max_min_macros] [4]',
'foo.h')
def test_min_macro(self):
self.assert_lint(
'int i = MIN(0, 1);',
'',
'foo.c')
self.assert_lint(
'int i = MIN(0, 1);',
'Use std::min() or std::min<type>() instead of the MIN() macro.'
' [runtime/max_min_macros] [4]',
'foo.cpp')
self.assert_lint(
'inline int foo() { return MIN(0, 1); }',
'Use std::min() or std::min<type>() instead of the MIN() macro.'
' [runtime/max_min_macros] [4]',
'foo.h')
def test_ctype_fucntion(self):
self.assert_lint(
'int i = isascii(8);',
'Use equivelent function in <wtf/ASCIICType.h> instead of the '
'isascii() function. [runtime/ctype_function] [4]',
'foo.cpp')
def test_names(self):
name_underscore_error_message = " is incorrectly named. Don't use underscores in your identifier names. [readability/naming/underscores] [4]"
name_tooshort_error_message = " is incorrectly named. Don't use the single letter 'l' as an identifier name. [readability/naming] [4]"
# Basic cases from WebKit style guide.
self.assert_lint('struct Data;', '')
self.assert_lint('size_t bufferSize;', '')
self.assert_lint('class HTMLDocument;', '')
self.assert_lint('String mimeType();', '')
self.assert_lint('size_t buffer_size;',
'buffer_size' + name_underscore_error_message)
self.assert_lint('short m_length;', '')
self.assert_lint('short _length;',
'_length' + name_underscore_error_message)
self.assert_lint('short length_;',
'length_' + name_underscore_error_message)
self.assert_lint('unsigned _length;',
'_length' + name_underscore_error_message)
self.assert_lint('unsigned long _length;',
'_length' + name_underscore_error_message)
self.assert_lint('unsigned long long _length;',
'_length' + name_underscore_error_message)
# Allow underscores in Objective C files.
self.assert_lint('unsigned long long _length;',
'',
'foo.m')
self.assert_lint('unsigned long long _length;',
'',
'foo.mm')
self.assert_lint('#import "header_file.h"\n'
'unsigned long long _length;',
'',
'foo.h')
self.assert_lint('unsigned long long _length;\n'
'@interface WebFullscreenWindow;',
'',
'foo.h')
self.assert_lint('unsigned long long _length;\n'
'@implementation WebFullscreenWindow;',
'',
'foo.h')
self.assert_lint('unsigned long long _length;\n'
'@class WebWindowFadeAnimation;',
'',
'foo.h')
# Variable name 'l' is easy to confuse with '1'
self.assert_lint('int l;', 'l' + name_tooshort_error_message)
self.assert_lint('size_t l;', 'l' + name_tooshort_error_message)
self.assert_lint('long long l;', 'l' + name_tooshort_error_message)
# Pointers, references, functions, templates, and adjectives.
self.assert_lint('char* under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('const int UNDER_SCORE;',
'UNDER_SCORE' + name_underscore_error_message)
self.assert_lint('static inline const char const& const under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('WebCore::LayoutObject* under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('int func_name();',
'func_name' + name_underscore_error_message)
self.assert_lint('RefPtr<LayoutObject*> under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('WTF::Vector<WTF::RefPtr<const LayoutObject* const>> under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('int under_score[];',
'under_score' + name_underscore_error_message)
self.assert_lint('struct dirent* under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('long under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('long long under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('long double under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('long long int under_score;',
'under_score' + name_underscore_error_message)
# Declarations in control statement.
self.assert_lint('if (int under_score = 42) {',
'under_score' + name_underscore_error_message)
self.assert_lint('else if (int under_score = 42) {',
'under_score' + name_underscore_error_message)
self.assert_lint('for (int under_score = 42; cond; i++) {',
'under_score' + name_underscore_error_message)
self.assert_lint('while (foo & under_score = bar) {',
'under_score' + name_underscore_error_message)
self.assert_lint('for (foo * under_score = p; cond; i++) {',
'under_score' + name_underscore_error_message)
self.assert_lint('for (foo * under_score; cond; i++) {',
'under_score' + name_underscore_error_message)
self.assert_lint('while (foo & value_in_thirdparty_library) {', '')
self.assert_lint('while (foo * value_in_thirdparty_library) {', '')
self.assert_lint('if (mli && S_OK == mli->foo()) {', '')
# More member variables and functions.
self.assert_lint('int SomeClass::s_validName', '')
self.assert_lint('int m_under_score;',
'm_under_score' + name_underscore_error_message)
self.assert_lint('int SomeClass::s_under_score = 0;',
'SomeClass::s_under_score' + name_underscore_error_message)
self.assert_lint('int SomeClass::under_score = 0;',
'SomeClass::under_score' + name_underscore_error_message)
# Other statements.
self.assert_lint('return INT_MAX;', '')
self.assert_lint('return_t under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('goto under_score;',
'under_score' + name_underscore_error_message)
self.assert_lint('delete static_cast<Foo*>(p);', '')
# Multiple variables in one line.
self.assert_lint('void myFunction(int variable1, int another_variable);',
'another_variable' + name_underscore_error_message)
self.assert_lint('int variable1, another_variable;',
'another_variable' + name_underscore_error_message)
self.assert_lint('int first_variable, secondVariable;',
'first_variable' + name_underscore_error_message)
self.assert_lint('void my_function(int variable_1, int variable_2);',
['my_function' + name_underscore_error_message,
'variable_1' + name_underscore_error_message,
'variable_2' + name_underscore_error_message])
self.assert_lint('for (int variable_1, variable_2;;) {',
['variable_1' + name_underscore_error_message,
'variable_2' + name_underscore_error_message])
# There is an exception for op code functions but only in the JavaScriptCore directory.
self.assert_lint('void this_op_code(int var1, int var2)', '', 'Source/JavaScriptCore/foo.cpp')
self.assert_lint('void op_code(int var1, int var2)', '', 'Source/JavaScriptCore/foo.cpp')
self.assert_lint('void this_op_code(int var1, int var2)', 'this_op_code' + name_underscore_error_message)
# GObject requires certain magical names in class declarations.
self.assert_lint('void webkit_dom_object_init();', '')
self.assert_lint('void webkit_dom_object_class_init();', '')
# There is an exception for GTK+ API.
self.assert_lint('void webkit_web_view_load(int var1, int var2)', '', 'Source/Webkit/gtk/webkit/foo.cpp')
self.assert_lint('void webkit_web_view_load(int var1, int var2)', '', 'Source/Webkit2/UIProcess/gtk/foo.cpp')
# Test that this doesn't also apply to files not in a 'gtk' directory.
self.assert_lint('void webkit_web_view_load(int var1, int var2)',
'webkit_web_view_load is incorrectly named. Don\'t use underscores in your identifier names.'
' [readability/naming/underscores] [4]', 'Source/Webkit/webkit/foo.cpp')
# Test that this doesn't also apply to names that don't start with 'webkit_'.
self.assert_lint_one_of_many_errors_re('void otherkit_web_view_load(int var1, int var2)',
'otherkit_web_view_load is incorrectly named. Don\'t use underscores in your identifier names.'
' [readability/naming/underscores] [4]', 'Source/Webkit/webkit/foo.cpp')
# There is an exception for some unit tests that begin with "tst_".
self.assert_lint('void tst_QWebFrame::arrayObjectEnumerable(int var1, int var2)', '')
# The Qt API uses names that begin with "qt_" or "_q_".
self.assert_lint('void QTFrame::qt_drt_is_awesome(int var1, int var2)', '')
self.assert_lint('void QTFrame::_q_drt_is_awesome(int var1, int var2)', '')
self.assert_lint('void qt_drt_is_awesome(int var1, int var2);', '')
self.assert_lint('void _q_drt_is_awesome(int var1, int var2);', '')
# Cairo forward-declarations should not be a failure.
self.assert_lint('typedef struct _cairo cairo_t;', '')
self.assert_lint('typedef struct _cairo_surface cairo_surface_t;', '')
self.assert_lint('typedef struct _cairo_scaled_font cairo_scaled_font_t;', '')
# EFL forward-declarations should not be a failure.
self.assert_lint('typedef struct _Ecore_Evas Ecore_Evas;', '')
self.assert_lint('typedef struct _Ecore_Pipe Ecore_Pipe;', '')
self.assert_lint('typedef struct _Eina_Rectangle Eina_Rectangle;', '')
self.assert_lint('typedef struct _Evas_Object Evas_Object;', '')
self.assert_lint('typedef struct _Ewk_History_Item Ewk_History_Item;', '')
# NPAPI functions that start with NPN_, NPP_ or NP_ are allowed.
self.assert_lint('void NPN_Status(NPP, const char*)', '')
self.assert_lint('NPError NPP_SetWindow(NPP instance, NPWindow *window)', '')
self.assert_lint('NPObject* NP_Allocate(NPP, NPClass*)', '')
# const_iterator is allowed as well.
self.assert_lint('typedef VectorType::const_iterator const_iterator;', '')
# vm_throw is allowed as well.
self.assert_lint('int vm_throw;', '')
# Bitfields.
self.assert_lint('unsigned _fillRule : 1;',
'_fillRule' + name_underscore_error_message)
# new operators in initialization.
self.assert_lint('OwnPtr<uint32_t> variable(new uint32_t);', '')
self.assert_lint('OwnPtr<uint32_t> variable(new (expr) uint32_t);', '')
self.assert_lint('OwnPtr<uint32_t> under_score(new uint32_t);',
'under_score' + name_underscore_error_message)
# Conversion operator declaration.
self.assert_lint('operator int64_t();', '')
def test_parameter_names(self):
# Leave meaningless variable names out of function declarations.
meaningless_variable_name_error_message = 'The parameter name "%s" adds no information, so it should be removed. [readability/parameter_name] [5]'
parameter_error_rules = ('-',
'+readability/parameter_name')
# No variable name, so no error.
self.assertEqual('',
self.perform_lint('void func(int);', 'test.cpp', parameter_error_rules))
# Verify that copying the name of the set function causes the error (with some odd casing).
self.assertEqual(meaningless_variable_name_error_message % 'itemCount',
self.perform_lint('void setItemCount(size_t itemCount);', 'test.cpp', parameter_error_rules))
self.assertEqual(meaningless_variable_name_error_message % 'abcCount',
self.perform_lint('void setABCCount(size_t abcCount);', 'test.cpp', parameter_error_rules))
# Verify that copying a type name will trigger the warning (even if the type is a template parameter).
self.assertEqual(meaningless_variable_name_error_message % 'context',
self.perform_lint('void funct(PassRefPtr<ScriptExecutionContext> context);', 'test.cpp', parameter_error_rules))
# Verify that acronyms as variable names trigger the error (for both set functions and type names).
self.assertEqual(meaningless_variable_name_error_message % 'ec',
self.perform_lint('void setExceptionCode(int ec);', 'test.cpp', parameter_error_rules))
self.assertEqual(meaningless_variable_name_error_message % 'ec',
self.perform_lint('void funct(ExceptionCode ec);', 'test.cpp', parameter_error_rules))
# 'object' alone, appended, or as part of an acronym is meaningless.
self.assertEqual(meaningless_variable_name_error_message % 'object',
self.perform_lint('void funct(RenderView object);', 'test.cpp', parameter_error_rules))
self.assertEqual(meaningless_variable_name_error_message % 'viewObject',
self.perform_lint('void funct(RenderView viewObject);', 'test.cpp', parameter_error_rules))
self.assertEqual(meaningless_variable_name_error_message % 'rvo',
self.perform_lint('void funct(RenderView rvo);', 'test.cpp', parameter_error_rules))
# Check that r, g, b, and a are allowed.
self.assertEqual('',
self.perform_lint('void setRGBAValues(int r, int g, int b, int a);', 'test.cpp', parameter_error_rules))
# Verify that a simple substring match isn't done which would cause false positives.
self.assertEqual('',
self.perform_lint('void setNateLateCount(size_t elate);', 'test.cpp', parameter_error_rules))
self.assertEqual('',
self.perform_lint('void funct(NateLate elate);', 'test.cpp', parameter_error_rules))
# Don't have generate warnings for functions (only declarations).
self.assertEqual('',
self.perform_lint('void funct(PassRefPtr<ScriptExecutionContext> context)\n'
'{\n'
'}\n', 'test.cpp', parameter_error_rules))
def test_comments(self):
# A comment at the beginning of a line is ok.
self.assert_lint('// comment', '')
self.assert_lint(' // comment', '')
self.assert_lint('} // namespace WebCore',
'One space before end of line comments'
' [whitespace/comments] [5]')
def test_redundant_virtual(self):
self.assert_lint('virtual void fooMethod() override;', '"virtual" is redundant since function is already declared as "override" [readability/inheritance] [4]')
self.assert_lint('virtual void fooMethod(\n) override {}', '"virtual" is redundant since function is already declared as "override" [readability/inheritance] [4]')
self.assert_lint('virtual void fooMethod() final;', '"virtual" is redundant since function is already declared as "final" [readability/inheritance] [4]')
self.assert_lint('virtual void fooMethod(\n) final {}', '"virtual" is redundant since function is already declared as "final" [readability/inheritance] [4]')
def test_redundant_override(self):
self.assert_lint('void fooMethod() override final;', '"override" is redundant since function is already declared as "final" [readability/inheritance] [4]')
self.assert_lint('void fooMethod(\n) override final {}', '"override" is redundant since function is already declared as "final" [readability/inheritance] [4]')
self.assert_lint('void fooMethod() final override;', '"override" is redundant since function is already declared as "final" [readability/inheritance] [4]')
self.assert_lint('void fooMethod(\n) final override {}', '"override" is redundant since function is already declared as "final" [readability/inheritance] [4]')
def test_webkit_export_check(self):
webkit_export_error_rules = ('-',
'+readability/webkit_export')
self.assertEqual('',
self.perform_lint('WEBKIT_EXPORT int foo();\n',
'WebKit/chromium/public/test.h',
webkit_export_error_rules))
self.assertEqual('',
self.perform_lint('WEBKIT_EXPORT int foo();\n',
'WebKit/chromium/tests/test.h',
webkit_export_error_rules))
self.assertEqual('WEBKIT_EXPORT should only be used in header files. [readability/webkit_export] [5]',
self.perform_lint('WEBKIT_EXPORT int foo();\n',
'WebKit/chromium/public/test.cpp',
webkit_export_error_rules))
self.assertEqual('WEBKIT_EXPORT should only appear in the chromium public (or tests) directory. [readability/webkit_export] [5]',
self.perform_lint('WEBKIT_EXPORT int foo();\n',
'WebKit/chromium/src/test.h',
webkit_export_error_rules))
self.assertEqual('WEBKIT_EXPORT should not be used on a function with a body. [readability/webkit_export] [5]',
self.perform_lint('WEBKIT_EXPORT int foo() { }\n',
'WebKit/chromium/public/test.h',
webkit_export_error_rules))
self.assertEqual('WEBKIT_EXPORT should not be used on a function with a body. [readability/webkit_export] [5]',
self.perform_lint('WEBKIT_EXPORT inline int foo()\n'
'{\n'
'}\n',
'WebKit/chromium/public/test.h',
webkit_export_error_rules))
self.assertEqual('WEBKIT_EXPORT should not be used with a pure virtual function. [readability/webkit_export] [5]',
self.perform_lint('{}\n'
'WEBKIT_EXPORT\n'
'virtual\n'
'int\n'
'foo() = 0;\n',
'WebKit/chromium/public/test.h',
webkit_export_error_rules))
self.assertEqual('',
self.perform_lint('{}\n'
'WEBKIT_EXPORT\n'
'virtual\n'
'int\n'
'foo() = 0;\n',
'test.h',
webkit_export_error_rules))
def test_other(self):
# FIXME: Implement this.
pass
class CppCheckerTest(unittest.TestCase):
"""Tests CppChecker class."""
def mock_handle_style_error(self):
pass
def _checker(self):
return CppChecker("foo", "h", self.mock_handle_style_error, 3)
def test_init(self):
"""Test __init__ constructor."""
checker = self._checker()
self.assertEqual(checker.file_extension, "h")
self.assertEqual(checker.file_path, "foo")
self.assertEqual(checker.handle_style_error, self.mock_handle_style_error)
self.assertEqual(checker.min_confidence, 3)
def test_eq(self):
"""Test __eq__ equality function."""
checker1 = self._checker()
checker2 = self._checker()
# == calls __eq__.
self.assertTrue(checker1 == checker2)
def mock_handle_style_error2(self):
pass
# Verify that a difference in any argument cause equality to fail.
checker = CppChecker("foo", "h", self.mock_handle_style_error, 3)
self.assertFalse(checker == CppChecker("bar", "h", self.mock_handle_style_error, 3))
self.assertFalse(checker == CppChecker("foo", "c", self.mock_handle_style_error, 3))
self.assertFalse(checker == CppChecker("foo", "h", mock_handle_style_error2, 3))
self.assertFalse(checker == CppChecker("foo", "h", self.mock_handle_style_error, 4))
def test_ne(self):
"""Test __ne__ inequality function."""
checker1 = self._checker()
checker2 = self._checker()
# != calls __ne__.
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
self.assertFalse(checker1 != checker2)
|
licco/zipline | refs/heads/master | tests/test_execution_styles.py | 3 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from six.moves import range
from nose_parameterized import parameterized
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.utils.test_utils import(
setup_logger,
teardown_logger,
)
class ExecutionStyleTestCase(TestCase):
"""
Tests for zipline ExecutionStyle classes.
"""
epsilon = .000001
# Input, expected on limit buy/stop sell, expected on limit sell/stop buy.
EXPECTED_PRICE_ROUNDING = [
(0.00, 0.00, 0.00),
(0.0005, 0.00, 0.00),
(1.0005, 1.00, 1.00), # Lowest value to round down on sell.
(1.0005 + epsilon, 1.00, 1.01),
(1.0095 - epsilon, 1.0, 1.01),
(1.0095, 1.01, 1.01), # Highest value to round up on buy.
(0.01, 0.01, 0.01)
]
# Test that the same rounding behavior is maintained if we add between 1
# and 10 to all values, because floating point math is made of lies.
EXPECTED_PRICE_ROUNDING += [
(x + delta, y + delta, z + delta)
for (x, y, z) in EXPECTED_PRICE_ROUNDING
for delta in range(1, 10)
]
INVALID_PRICES = [(-1,), (-1.0,), (0 - epsilon,)]
def setUp(self):
setup_logger(self)
def tearDown(self):
teardown_logger(self)
@parameterized.expand(INVALID_PRICES)
def test_invalid_prices(self, price):
"""
Test that execution styles throw appropriate exceptions upon receipt
of an invalid price field.
"""
with self.assertRaises(ValueError):
LimitOrder(price)
with self.assertRaises(ValueError):
StopOrder(price)
for lmt, stp in [(price, 1), (1, price), (price, price)]:
with self.assertRaises(ValueError):
StopLimitOrder(lmt, stp)
def test_market_order_prices(self):
"""
Basic unit tests for the MarketOrder class.
"""
style = MarketOrder()
self.assertEqual(style.get_limit_price(True), None)
self.assertEqual(style.get_limit_price(False), None)
self.assertEqual(style.get_stop_price(True), None)
self.assertEqual(style.get_stop_price(False), None)
@parameterized.expand(EXPECTED_PRICE_ROUNDING)
def test_limit_order_prices(self,
price,
expected_limit_buy_or_stop_sell,
expected_limit_sell_or_stop_buy):
"""
Test price getters for the LimitOrder class.
"""
style = LimitOrder(price)
self.assertEqual(expected_limit_buy_or_stop_sell,
style.get_limit_price(True))
self.assertEqual(expected_limit_sell_or_stop_buy,
style.get_limit_price(False))
self.assertEqual(None, style.get_stop_price(True))
self.assertEqual(None, style.get_stop_price(False))
@parameterized.expand(EXPECTED_PRICE_ROUNDING)
def test_stop_order_prices(self,
price,
expected_limit_buy_or_stop_sell,
expected_limit_sell_or_stop_buy):
"""
Test price getters for StopOrder class. Note that the expected rounding
direction for stop prices is the reverse of that for limit prices.
"""
style = StopOrder(price)
self.assertEqual(None, style.get_limit_price(False))
self.assertEqual(None, style.get_limit_price(True))
self.assertEqual(expected_limit_buy_or_stop_sell,
style.get_stop_price(False))
self.assertEqual(expected_limit_sell_or_stop_buy,
style.get_stop_price(True))
@parameterized.expand(EXPECTED_PRICE_ROUNDING)
def test_stop_limit_order_prices(self,
price,
expected_limit_buy_or_stop_sell,
expected_limit_sell_or_stop_buy):
"""
Test price getters for StopLimitOrder class. Note that the expected
rounding direction for stop prices is the reverse of that for limit
prices.
"""
style = StopLimitOrder(price, price + 1)
self.assertEqual(expected_limit_buy_or_stop_sell,
style.get_limit_price(True))
self.assertEqual(expected_limit_sell_or_stop_buy,
style.get_limit_price(False))
self.assertEqual(expected_limit_buy_or_stop_sell + 1,
style.get_stop_price(False))
self.assertEqual(expected_limit_sell_or_stop_buy + 1,
style.get_stop_price(True))
|
shakamunyi/neutron-vrrp | refs/heads/master | neutron/tests/unit/ml2/drivers/arista/test_arista_l3_driver.py | 3 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Sukhdev Kapur, Arista Networks, Inc.
#
import mock
from oslo.config import cfg
from neutron.plugins.ml2.drivers.arista import arista_l3_driver as arista
from neutron.tests import base
def setup_arista_config(value='', vrf=False, mlag=False):
cfg.CONF.set_override('primary_l3_host', value, "l3_arista")
cfg.CONF.set_override('primary_l3_host_username', value, "l3_arista")
if vrf:
cfg.CONF.set_override('use_vrf', value, "l3_arista")
if mlag:
cfg.CONF.set_override('secondary_l3_host', value, "l3_arista")
cfg.CONF.set_override('mlag_config', value, "l3_arista")
class AristaL3DriverTestCasesDefaultVrf(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF.
"""
def setUp(self):
super(AristaL3DriverTestCasesDefaultVrf, self).setUp()
setup_arista_config('value')
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_create_router_on_eos(self):
router_name = 'test-router-1'
route_domain = '123:123'
self.drv.create_router_on_eos(router_name, route_domain,
self.drv._servers[0])
cmds = ['enable', 'configure', 'exit']
self.drv._servers[0].runCmds.assert_called_once_with(version=1,
cmds=cmds)
def test_delete_router_from_eos(self):
router_name = 'test-router-1'
self.drv.delete_router_from_eos(router_name, self.drv._servers[0])
cmds = ['enable', 'configure', 'exit']
self.drv._servers[0].runCmds.assert_called_once_with(version=1,
cmds=cmds)
def test_add_interface_to_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
router_ip = '10.10.10.10'
gw_ip = '10.10.10.1'
mask = '255.255.255.0'
self.drv.add_interface_to_router(segment_id, router_name, gw_ip,
router_ip, mask, self.drv._servers[0])
cmds = ['enable', 'configure', 'ip routing',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'ip address %s/%s' % (gw_ip, mask), 'exit']
self.drv._servers[0].runCmds.assert_called_once_with(version=1,
cmds=cmds)
def test_delete_interface_from_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
self.drv.delete_interface_from_router(segment_id, router_name,
self.drv._servers[0])
cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id,
'exit']
self.drv._servers[0].runCmds.assert_called_once_with(version=1,
cmds=cmds)
class AristaL3DriverTestCasesUsingVRFs(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions using multiple VRFs.
Note that the configuration commands are different when VRFs are used.
"""
def setUp(self):
super(AristaL3DriverTestCasesUsingVRFs, self).setUp()
setup_arista_config('value', vrf=True)
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_create_router_on_eos(self):
max_vrfs = 5
routers = ['testRouter-%s' % n for n in range(max_vrfs)]
domains = ['10%s' % n for n in range(max_vrfs)]
for (r, d) in zip(routers, domains):
self.drv.create_router_on_eos(r, d, self.drv._servers[0])
cmds = ['enable', 'configure',
'vrf definition %s' % r,
'rd %(rd)s:%(rd)s' % {'rd': d}, 'exit', 'exit']
self.drv._servers[0].runCmds.assert_called_with(version=1,
cmds=cmds)
def test_delete_router_from_eos(self):
max_vrfs = 5
routers = ['testRouter-%s' % n for n in range(max_vrfs)]
for r in routers:
self.drv.delete_router_from_eos(r, self.drv._servers[0])
cmds = ['enable', 'configure', 'no vrf definition %s' % r,
'exit']
self.drv._servers[0].runCmds.assert_called_with(version=1,
cmds=cmds)
def test_add_interface_to_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
router_ip = '10.10.10.10'
gw_ip = '10.10.10.1'
mask = '255.255.255.0'
self.drv.add_interface_to_router(segment_id, router_name, gw_ip,
router_ip, mask, self.drv._servers[0])
cmds = ['enable', 'configure',
'ip routing vrf %s' % router_name,
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'vrf forwarding %s' % router_name,
'ip address %s/%s' % (gw_ip, mask), 'exit']
self.drv._servers[0].runCmds.assert_called_once_with(version=1,
cmds=cmds)
def test_delete_interface_from_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
self.drv.delete_interface_from_router(segment_id, router_name,
self.drv._servers[0])
cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id,
'exit']
self.drv._servers[0].runCmds.assert_called_once_with(version=1,
cmds=cmds)
class AristaL3DriverTestCasesMlagConfig(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF using MLAG configuration.
MLAG configuration means that the commands will be sent to both
primary and secondary Arista Switches.
"""
def setUp(self):
super(AristaL3DriverTestCasesMlagConfig, self).setUp()
setup_arista_config('value', mlag=True)
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_create_router_on_eos(self):
router_name = 'test-router-1'
route_domain = '123:123'
router_mac = '00:11:22:33:44:55'
for s in self.drv._servers:
self.drv.create_router_on_eos(router_name, route_domain, s)
cmds = ['enable', 'configure',
'ip virtual-router mac-address %s' % router_mac, 'exit']
s.runCmds.assert_called_with(version=1, cmds=cmds)
def test_delete_router_from_eos(self):
router_name = 'test-router-1'
for s in self.drv._servers:
self.drv.delete_router_from_eos(router_name, s)
cmds = ['enable', 'configure',
'no ip virtual-router mac-address', 'exit']
s.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_add_interface_to_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
router_ip = '10.10.10.10'
gw_ip = '10.10.10.1'
mask = '255.255.255.0'
for s in self.drv._servers:
self.drv.add_interface_to_router(segment_id, router_name, gw_ip,
router_ip, mask, s)
cmds = ['enable', 'configure', 'ip routing',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'ip address %s' % router_ip,
'ip virtual-router address %s' % gw_ip, 'exit']
s.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_interface_from_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
for s in self.drv._servers:
self.drv.delete_interface_from_router(segment_id, router_name, s)
cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id,
'exit']
s.runCmds.assert_called_once_with(version=1, cmds=cmds)
class AristaL3DriverTestCases_v4(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF using IPv4.
"""
def setUp(self):
super(AristaL3DriverTestCases_v4, self).setUp()
setup_arista_config('value')
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_add_v4_interface_to_router(self):
gateway_ip = '10.10.10.1'
cidrs = ['10.10.10.0/24', '10.11.11.0/24']
# Add couple of IPv4 subnets to router
for cidr in cidrs:
router = {'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '123',
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 4}
self.assertFalse(self.drv.add_router_interface(None, router))
def test_delete_v4_interface_from_router(self):
gateway_ip = '10.10.10.1'
cidrs = ['10.10.10.0/24', '10.11.11.0/24']
# remove couple of IPv4 subnets from router
for cidr in cidrs:
router = {'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '123',
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 4}
self.assertFalse(self.drv.remove_router_interface(None, router))
class AristaL3DriverTestCases_v6(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF using IPv6.
"""
def setUp(self):
super(AristaL3DriverTestCases_v6, self).setUp()
setup_arista_config('value')
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_add_v6_interface_to_router(self):
gateway_ip = '3FFE::1'
cidrs = ['3FFE::/16', '2001::/16']
# Add couple of IPv6 subnets to router
for cidr in cidrs:
router = {'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '123',
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 6}
self.assertFalse(self.drv.add_router_interface(None, router))
def test_delete_v6_interface_from_router(self):
gateway_ip = '3FFE::1'
cidrs = ['3FFE::/16', '2001::/16']
# remove couple of IPv6 subnets from router
for cidr in cidrs:
router = {'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '123',
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 6}
self.assertFalse(self.drv.remove_router_interface(None, router))
class AristaL3DriverTestCases_MLAG_v6(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF on MLAG'ed switches using IPv6.
"""
def setUp(self):
super(AristaL3DriverTestCases_MLAG_v6, self).setUp()
setup_arista_config('value', mlag=True)
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_add_v6_interface_to_router(self):
gateway_ip = '3FFE::1'
cidrs = ['3FFE::/16', '2001::/16']
# Add couple of IPv6 subnets to router
for cidr in cidrs:
router = {'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '123',
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 6}
self.assertFalse(self.drv.add_router_interface(None, router))
def test_delete_v6_interface_from_router(self):
gateway_ip = '3FFE::1'
cidrs = ['3FFE::/16', '2001::/16']
# remove couple of IPv6 subnets from router
for cidr in cidrs:
router = {'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '123',
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 6}
self.assertFalse(self.drv.remove_router_interface(None, router))
|
sjperkins/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/scatter_nd_ops_test.py | 48 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape([
functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)
] + shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(shape[:ndims - 1] + [
functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)
])
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output],
flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
def _NumpyAdd(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u)
def _NumpySub(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u)
def _NumpyMul(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u)
def _NumpyDiv(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u)
class ScatterNdTest(test.TestCase):
def _VariableRankTest(self,
np_scatter,
tf_scatter,
vtype,
itype,
repeat_indices=False):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
with self.test_session(use_gpu=True):
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(
np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[:num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
ref = _AsType(np.random.randn(*(ref_shape)), vtype)
# Scatter via numpy
new = ref.copy()
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref_var = variables.Variable(ref)
ref_var.initializer.run()
tf_scatter(ref_var, indices, updates).eval()
# Compare
self.assertAllClose(new, ref_var.eval())
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in (np.float32, np.float64):
for itype in (np.int32, np.int64):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def testSimple(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = variables.Variable([0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testSimple2(self):
indices = constant_op.constant([[1, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([11., 12.], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testSimple3(self):
indices = constant_op.constant([[1]], dtype=dtypes.int32)
updates = constant_op.constant([[11., 12.]], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, state_ops.scatter_nd_update)
def testVariableRankAdd(self):
self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add)
def testVariableRankSub(self):
self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub)
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# def testVariableRankMul(self):
# self._VariableRankTests(_NumpyMul, tf.scatter_nd_mul)
# def testVariableRankDiv(self):
# self._VariableRankTests(_NumpyDiv, tf.scatter_nd_div)
def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter):
for vtype in (np.float32, np.float64):
for itype in (np.int32, np.int64):
self._VariableRankTest(
np_scatter, tf_scatter, vtype, itype, repeat_indices=True)
def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat."""
self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add)
self._ScatterRepeatIndicesTest(_NumpySub, state_ops.scatter_nd_sub)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control.
# self._ScatterRepeatIndicesTest(_NumpyMul, tf.scatter_nd_mul)
# self._ScatterRepeatIndicesTest(_NumpyDiv, tf.scatter_nd_div)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control and this op is re-enabled
# def testBooleanScatterUpdate(self):
# with self.test_session(use_gpu=False) as session:
# var = tf.Variable([True, False])
# update0 = tf.scatter_nd_update(var, [[1]], [True])
# update1 = tf.scatter_nd_update(
# var, tf.constant(
# [[0]], dtype=tf.int64), [False])
# var.initializer.run()
# session.run([update0, update1])
# self.assertAllEqual([False, True], var.eval())
def testScatterOutOfRangeCpu(self):
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.test_session(use_gpu=False):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([[-1], [0], [5]])
with self.assertRaisesOpError(
r"Invalid indices: \[0,0\] = \[-1\] is not in \[0, 6\)"):
op(ref, indices, updates).eval()
indices = np.array([[2], [0], [6]])
with self.assertRaisesOpError(
r"Invalid indices: \[2,0\] = \[6\] is not in \[0, 6\)"):
op(ref, indices, updates).eval()
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
self.assertAllEqual(
array_ops.scatter_nd(indices, updates, shape).get_shape().as_list(),
shape)
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
self.assertAllEqual(
state_ops.scatter_nd_update(ref, indices,
updates).get_shape().as_list(), shape)
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
scatter = array_ops.scatter_nd(indices, updates, shape)
self.assertAllEqual(scatter.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.test_session():
self.assertAllEqual(expected_result, scatter.eval())
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
scatter_update = state_ops.scatter_nd_update(ref, indices, updates)
self.assertAllEqual(scatter_update.get_shape().as_list(), shape)
with self.test_session():
ref.initializer.run()
self.assertAllEqual(expected_result, scatter_update.eval())
def testUndefinedIndicesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = constant_op.constant([2, 2, 2], dtypes.int32)
array_ops.scatter_nd(indices, updates, shape)
def testUndefinedUpdatesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([2, 2, 2], dtypes.int32)
array_ops.scatter_nd(indices, updates, shape)
def testUndefinedOutputShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = array_ops.placeholder(dtypes.int32, shape=[None])
array_ops.scatter_nd(indices, updates, shape)
def testEmptyOutputShape1(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Indices and updates specified for empty output shape"):
array_ops.scatter_nd(indices, updates, shape)
def testEmptyOutputShape2(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.test_session():
array_ops.scatter_nd(indices, updates, shape).eval(feed_dict={
indices: np.zeros(
[2, 2, 2], dtype=np.int32),
updates: np.zeros(
[2, 2, 2], dtype=np.int32)
})
def testEmptyOutputShape3(self):
indices = array_ops.zeros([0], dtypes.int32)
updates = array_ops.zeros([0], dtypes.int32)
shape = constant_op.constant([0], dtypes.int32)
scatter = array_ops.scatter_nd(indices, updates, shape)
with self.test_session():
self.assertEqual(scatter.eval().size, 0)
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, "The outer \\d+ dimensions of indices\\.shape="):
array_ops.scatter_nd(indices, updates, shape)
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, "The outer \\d+ dimensions of indices\\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, "The inner \\d+ dimensions of output\\.shape="):
array_ops.scatter_nd(indices, updates, shape)
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, "The inner \\d+ dimensions of ref\\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
def testGradientsRank2ElementUpdate(self):
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([1, 4], dtype=dtypes.float64)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
outputs = array_ops.scatter_nd(indices, updates, shape)
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [updates], [grad_vals])[0]
expected_grads = np.array([1, 4], dtype=np.float64)
with self.test_session():
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsRank2SliceUpdate(self):
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
updates = constant_op.constant([[3, 4], [1, 2]], dtype=dtypes.float64)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
outputs = array_ops.scatter_nd(indices, updates, shape)
grad_vals = constant_op.constant([[3, 4], [1, 2]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [updates], [grad_vals])[0]
expected_grads = np.array([[1, 2], [3, 4]], dtype=np.float64)
with self.test_session():
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsRank3SliceUpdate(self):
indices = constant_op.constant(
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int32)
updates = constant_op.constant(
[[[5, 7], [2, 4]], [[1, 3], [6, 8]]], dtype=dtypes.float64)
shape = constant_op.constant([2, 2, 2], dtype=dtypes.int32)
outputs = array_ops.scatter_nd(indices, updates, shape)
grad_vals = constant_op.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [updates], [grad_vals])[0]
expected_grads = np.array(
[[[3, 4], [5, 6]], [[1, 2], [7, 8]]], dtype=np.float64)
with self.test_session():
self.assertAllEqual(expected_grads, grads.eval())
def testConcurrentUpdates(self):
num_updates = 10000
update_values = np.random.rand(num_updates)
ref = variables.Variable(np.zeros([2, 2]), dtype=dtypes.float64)
indices = constant_op.constant([[0, 1]] * num_updates, dtype=dtypes.int32)
updates = constant_op.constant(update_values, dtype=dtypes.float64)
expected_result = np.zeros([2, 2], dtype=np.float64)
expected_result[0, 1] = np.sum(update_values)
scatter = state_ops.scatter_nd_add(ref, indices, updates)
init = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init)
result = sess.run(scatter)
assert np.allclose(result, expected_result)
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if not test.IsBuiltWithCuda():
return
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.test_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
def testScatterNdRepatedIndicesAdd(self):
indices = array_ops.zeros([100000, 1], dtypes.int32)
values = np.random.randn(100000)
shape = [1]
with self.test_session():
val = array_ops.scatter_nd(indices, values, shape).eval()
self.assertAllClose([np.sum(values)], val)
def testSmokeScatterNdBatch2DSliceDim2(self):
with self.test_session():
indices = array_ops.zeros([3, 5, 2], dtype=dtypes.int32)
values = array_ops.zeros([3, 5, 7])
shape = [4, 6, 7]
array_ops.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch1DSliceDim2(self):
with self.test_session():
indices = array_ops.zeros([0, 2], dtype=dtypes.int32)
values = array_ops.zeros([0, 7])
shape = [4, 6, 7]
array_ops.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch1DSliceDim3ShapeRank7(self):
with self.test_session():
indices = array_ops.zeros([1, 3], dtype=dtypes.int32)
values = array_ops.zeros([1, 6, 7, 8, 9])
shape = [3, 4, 5, 6, 7, 8, 9]
array_ops.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch2DSliceDim3ShapeRank7(self):
with self.test_session():
indices = array_ops.zeros([1, 2, 3], dtype=dtypes.int32)
values = array_ops.zeros([1, 2, 6, 7, 8, 9])
shape = [3, 4, 5, 6, 7, 8, 9]
array_ops.scatter_nd(indices, values, shape).eval()
if __name__ == "__main__":
test.main()
|
nikoonia/gem5v | refs/heads/master | src/arch/x86/isa/insts/x87/control/save_and_restore_x87_environment.py | 26 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FLDENV
# FNSTENV
# FSTENV
'''
|
fxtentacle/Cura | refs/heads/master | cura/PrintInformation.py | 4 | # Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from PyQt5.QtCore import QObject, QDateTime, QTimer, pyqtSignal, pyqtSlot, pyqtProperty
from UM.Application import Application
from UM.Settings.MachineSettings import MachineSettings
from UM.Resources import Resources
from UM.Scene.SceneNode import SceneNode
from UM.Qt.Duration import Duration
import math
## A class for processing and calculating minimum, currrent and maximum print time.
#
# This class contains all the logic relating to calculation and slicing for the
# time/quality slider concept. It is a rather tricky combination of event handling
# and state management. The logic behind this is as follows:
#
# - A scene change or settting change event happens.
# We track what the source was of the change, either a scene change, a setting change, an active machine change or something else.
# - This triggers a new slice with the current settings - this is the "current settings pass".
# - When the slice is done, we update the current print time and material amount.
# - If the source of the slice was not a Setting change, we start the second slice pass, the "low quality settings pass". Otherwise we stop here.
# - When that is done, we update the minimum print time and start the final slcice pass, the "high quality settings pass".
# - When the high quality pass is done, we update the maximum print time.
#
class PrintInformation(QObject):
class SlicePass:
CurrentSettings = 1
LowQualitySettings = 2
HighQualitySettings = 3
class SliceReason:
SceneChanged = 1
SettingChanged = 2
ActiveMachineChanged = 3
Other = 4
def __init__(self, parent = None):
super().__init__(parent)
self._current_print_time = Duration(None, self)
self._material_amount = -1
self._backend = Application.getInstance().getBackend()
if self._backend:
self._backend.printDurationMessage.connect(self._onPrintDurationMessage)
currentPrintTimeChanged = pyqtSignal()
@pyqtProperty(Duration, notify = currentPrintTimeChanged)
def currentPrintTime(self):
return self._current_print_time
materialAmountChanged = pyqtSignal()
@pyqtProperty(float, notify = materialAmountChanged)
def materialAmount(self):
return self._material_amount
def _onPrintDurationMessage(self, time, amount):
#if self._slice_pass == self.SlicePass.CurrentSettings:
self._current_print_time.setDuration(time)
self.currentPrintTimeChanged.emit()
# Material amount is sent as an amount of mm^3, so calculate length from that
r = Application.getInstance().getMachineManager().getActiveProfile().getSettingValue("material_diameter") / 2
self._material_amount = round((amount / (math.pi * r ** 2)) / 1000, 2)
self.materialAmountChanged.emit()
|
dnjohnstone/hyperspy | refs/heads/RELEASE_next_minor | hyperspy/__init__.py | 4 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
_logger = logging.getLogger(__name__)
from hyperspy import docstrings
__doc__ = """
HyperSpy: a multi-dimensional data analysis package for Python
==============================================================
Documentation is available in the docstrings and online at
http://hyperspy.org/hyperspy-doc/current/index.html.
All public packages, functions and classes are in :mod:`~hyperspy.api`. All
other packages and modules are for internal consumption and should not be
needed for data analysis.
%s
More details in the :mod:`~hyperspy.api` docstring.
""" % docstrings.START_HSPY
from . import Release
__all__ = ["api"]
__version__ = Release.version
|
ryfeus/lambda-packs | refs/heads/master | Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py | 63 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Proximal stochastic dual coordinate ascent optimizer for linear models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
from tensorflow.contrib.linear_optimizer.python.ops.sharded_mutable_dense_hashtable import ShardedMutableDenseHashTable
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework.ops import internal_convert_to_tensor
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sdca_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as var_ops
from tensorflow.python.ops.nn import sigmoid_cross_entropy_with_logits
from tensorflow.python.summary import summary
__all__ = ['SdcaModel']
# TODO(sibyl-Aix6ihai): add name_scope to appropriate methods.
class SdcaModel(object):
"""Stochastic dual coordinate ascent solver for linear models.
This class currently only supports a single machine (multi-threaded)
implementation. We expect the weights and duals to fit in a single machine.
Loss functions supported:
* Binary logistic loss
* Squared loss
* Hinge loss
* Smooth hinge loss
This class defines an optimizer API to train a linear model.
### Usage
```python
# Create a solver with the desired parameters.
lr = tf.contrib.linear_optimizer.SdcaModel(examples, variables, options)
min_op = lr.minimize()
opt_op = lr.update_weights(min_op)
predictions = lr.predictions(examples)
# Primal loss + L1 loss + L2 loss.
regularized_loss = lr.regularized_loss(examples)
# Primal loss only
unregularized_loss = lr.unregularized_loss(examples)
examples: {
sparse_features: list of SparseFeatureColumn.
dense_features: list of dense tensors of type float32.
example_labels: a tensor of type float32 and shape [Num examples]
example_weights: a tensor of type float32 and shape [Num examples]
example_ids: a tensor of type string and shape [Num examples]
}
variables: {
sparse_features_weights: list of tensors of shape [vocab size]
dense_features_weights: list of tensors of shape [dense_feature_dimension]
}
options: {
symmetric_l1_regularization: 0.0
symmetric_l2_regularization: 1.0
loss_type: "logistic_loss"
num_loss_partitions: 1 (Optional, with default value of 1. Number of
partitions of the global loss function, 1 means single machine solver,
and >1 when we have more than one optimizer working concurrently.)
num_table_shards: 1 (Optional, with default value of 1. Number of shards
of the internal state table, typically set to match the number of
parameter servers for large data sets.
}
```
In the training program you will just have to run the returned Op from
minimize().
```python
# Execute opt_op and train for num_steps.
for _ in range(num_steps):
opt_op.run()
# You can also check for convergence by calling
lr.approximate_duality_gap()
```
"""
def __init__(self, examples, variables, options):
"""Create a new sdca optimizer."""
if not examples or not variables or not options:
raise ValueError('examples, variables and options must all be specified.')
supported_losses = ('logistic_loss', 'squared_loss', 'hinge_loss',
'smooth_hinge_loss')
if options['loss_type'] not in supported_losses:
raise ValueError('Unsupported loss_type: ', options['loss_type'])
self._assertSpecified([
'example_labels', 'example_weights', 'example_ids', 'sparse_features',
'dense_features'
], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
self._assertSpecified(['sparse_features_weights', 'dense_features_weights'],
variables)
self._assertList(['sparse_features_weights', 'dense_features_weights'],
variables)
self._assertSpecified([
'loss_type', 'symmetric_l2_regularization',
'symmetric_l1_regularization'
], options)
for name in ['symmetric_l1_regularization', 'symmetric_l2_regularization']:
value = options[name]
if value < 0.0:
raise ValueError('%s should be non-negative. Found (%f)' %
(name, value))
self._examples = examples
self._variables = variables
self._options = options
self._create_slots()
self._hashtable = ShardedMutableDenseHashTable(
key_dtype=dtypes.int64,
value_dtype=dtypes.float32,
num_shards=self._num_table_shards(),
default_value=[0.0, 0.0, 0.0, 0.0],
# SdcaFprint never returns 0 or 1 for the low64 bits, so this a safe
# empty_key (that will never collide with actual payloads).
empty_key=[0, 0])
summary.scalar('approximate_duality_gap', self.approximate_duality_gap())
summary.scalar('examples_seen', self._hashtable.size())
def _symmetric_l1_regularization(self):
return self._options['symmetric_l1_regularization']
def _symmetric_l2_regularization(self):
# Algorithmic requirement (for now) is to have minimal l2 of 1.0.
return max(self._options['symmetric_l2_regularization'], 1.0)
def _num_loss_partitions(self):
# Number of partitions of the global objective.
# TODO(andreasst): set num_loss_partitions automatically based on the number
# of workers
return self._options.get('num_loss_partitions', 1)
def _num_table_shards(self):
# Number of hash table shards.
# Return 1 if not specified or if the value is 'None'
# TODO(andreasst): set num_table_shards automatically based on the number
# of parameter servers
num_shards = self._options.get('num_table_shards')
return 1 if num_shards is None else num_shards
# TODO(sibyl-Aix6ihai): Use optimizer interface to make use of slot creation logic.
def _create_slots(self):
# Make internal variables which have the updates before applying L1
# regularization.
self._slots = collections.defaultdict(list)
for name in ['sparse_features_weights', 'dense_features_weights']:
for var in self._variables[name]:
with ops.device(var.device):
# TODO(andreasst): remove SDCAOptimizer suffix once bug 30843109 is
# fixed
self._slots['unshrinked_' + name].append(
var_ops.Variable(
array_ops.zeros_like(var.initialized_value(), dtypes.float32),
name=var.op.name + '_unshrinked/SDCAOptimizer'))
def _assertSpecified(self, items, check_in):
for x in items:
if check_in[x] is None:
raise ValueError(check_in[x] + ' must be specified.')
def _assertList(self, items, check_in):
for x in items:
if not isinstance(check_in[x], list):
raise ValueError(x + ' must be a list.')
def _l1_loss(self):
"""Computes the (un-normalized) l1 loss of the model."""
with name_scope('sdca/l1_loss'):
sums = []
for name in ['sparse_features_weights', 'dense_features_weights']:
for weights in self._convert_n_to_tensor(self._variables[name]):
with ops.device(weights.device):
sums.append(
math_ops.reduce_sum(
math_ops.abs(math_ops.cast(weights, dtypes.float64))))
sum = math_ops.add_n(sums)
# SDCA L1 regularization cost is: l1 * sum(|weights|)
return self._options['symmetric_l1_regularization'] * sum
def _l2_loss(self, l2):
"""Computes the (un-normalized) l2 loss of the model."""
with name_scope('sdca/l2_loss'):
sums = []
for name in ['sparse_features_weights', 'dense_features_weights']:
for weights in self._convert_n_to_tensor(self._variables[name]):
with ops.device(weights.device):
sums.append(
math_ops.reduce_sum(
math_ops.square(math_ops.cast(weights, dtypes.float64))))
sum = math_ops.add_n(sums)
# SDCA L2 regularization cost is: l2 * sum(weights^2) / 2
return l2 * sum / 2.0
def _convert_n_to_tensor(self, input_list, as_ref=False):
"""Converts input list to a set of tensors."""
return [internal_convert_to_tensor(x, as_ref=as_ref) for x in input_list]
def _linear_predictions(self, examples):
"""Returns predictions of the form w*x."""
with name_scope('sdca/prediction'):
sparse_variables = self._convert_n_to_tensor(self._variables[
'sparse_features_weights'])
result = 0.0
for sfc, sv in zip(examples['sparse_features'], sparse_variables):
# TODO(sibyl-Aix6ihai): following does not take care of missing features.
result += math_ops.segment_sum(
math_ops.multiply(
array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
sfc.example_indices)
dense_features = self._convert_n_to_tensor(examples['dense_features'])
dense_variables = self._convert_n_to_tensor(self._variables[
'dense_features_weights'])
for i in range(len(dense_variables)):
result += math_ops.matmul(dense_features[i],
array_ops.expand_dims(dense_variables[i], -1))
# Reshaping to allow shape inference at graph construction time.
return array_ops.reshape(result, [-1])
def predictions(self, examples):
"""Add operations to compute predictions by the model.
If logistic_loss is being used, predicted probabilities are returned.
Otherwise, (raw) linear predictions (w*x) are returned.
Args:
examples: Examples to compute predictions on.
Returns:
An Operation that computes the predictions for examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(
['example_weights', 'sparse_features', 'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
result = self._linear_predictions(examples)
if self._options['loss_type'] == 'logistic_loss':
# Convert logits to probability for logistic loss predictions.
with name_scope('sdca/logistic_prediction'):
result = math_ops.sigmoid(result)
return result
def minimize(self, global_step=None, name=None):
"""Add operations to train a linear model by minimizing the loss function.
Args:
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation.
Returns:
An Operation that updates the variables passed in the constructor.
"""
# Technically, the op depends on a lot more than the variables,
# but we'll keep the list short.
with name_scope(name, 'sdca/minimize'):
sparse_example_indices = []
sparse_feature_indices = []
sparse_features_values = []
for sf in self._examples['sparse_features']:
sparse_example_indices.append(sf.example_indices)
sparse_feature_indices.append(sf.feature_indices)
# If feature values are missing, sdca assumes a value of 1.0f.
if sf.feature_values is not None:
sparse_features_values.append(sf.feature_values)
# pylint: disable=protected-access
example_ids_hashed = gen_sdca_ops.sdca_fprint(
internal_convert_to_tensor(self._examples['example_ids']))
# pylint: enable=protected-access
example_state_data = self._hashtable.lookup(example_ids_hashed)
# Solver returns example_state_update, new delta sparse_feature_weights
# and delta dense_feature_weights.
weights_tensor = self._convert_n_to_tensor(self._slots[
'unshrinked_sparse_features_weights'])
sparse_weights = []
sparse_indices = []
for w, i in zip(weights_tensor, sparse_feature_indices):
# Find the feature ids to lookup in the variables.
with ops.device(w.device):
sparse_indices.append(
math_ops.cast(
array_ops.unique(math_ops.cast(i, dtypes.int32))[0],
dtypes.int64))
sparse_weights.append(array_ops.gather(w, sparse_indices[-1]))
# pylint: disable=protected-access
esu, sfw, dfw = gen_sdca_ops.sdca_optimizer(
sparse_example_indices,
sparse_feature_indices,
sparse_features_values,
self._convert_n_to_tensor(self._examples['dense_features']),
internal_convert_to_tensor(self._examples['example_weights']),
internal_convert_to_tensor(self._examples['example_labels']),
sparse_indices,
sparse_weights,
self._convert_n_to_tensor(self._slots[
'unshrinked_dense_features_weights']),
example_state_data,
loss_type=self._options['loss_type'],
l1=self._options['symmetric_l1_regularization'],
l2=self._symmetric_l2_regularization(),
num_loss_partitions=self._num_loss_partitions(),
num_inner_iterations=1)
# pylint: enable=protected-access
with ops.control_dependencies([esu]):
update_ops = [self._hashtable.insert(example_ids_hashed, esu)]
# Update the weights before the proximal step.
for w, i, u in zip(self._slots['unshrinked_sparse_features_weights'],
sparse_indices, sfw):
update_ops.append(state_ops.scatter_add(w, i, u))
for w, u in zip(self._slots['unshrinked_dense_features_weights'], dfw):
update_ops.append(w.assign_add(u))
if not global_step:
return control_flow_ops.group(*update_ops)
with ops.control_dependencies(update_ops):
return state_ops.assign_add(global_step, 1, name=name).op
def update_weights(self, train_op):
"""Updates the model weights.
This function must be called on at least one worker after `minimize`.
In distributed training this call can be omitted on non-chief workers to
speed up training.
Args:
train_op: The operation returned by the `minimize` call.
Returns:
An Operation that updates the model weights.
"""
with ops.control_dependencies([train_op]):
update_ops = []
# Copy over unshrinked weights to user provided variables.
for name in ['sparse_features_weights', 'dense_features_weights']:
for var, slot_var in zip(self._variables[name],
self._slots['unshrinked_' + name]):
update_ops.append(var.assign(slot_var))
# Apply proximal step.
with ops.control_dependencies(update_ops):
update_ops = []
for name in ['sparse_features_weights', 'dense_features_weights']:
for var in self._variables[name]:
with ops.device(var.device):
# pylint: disable=protected-access
update_ops.append(
gen_sdca_ops.sdca_shrink_l1(
self._convert_n_to_tensor(
[var], as_ref=True),
l1=self._symmetric_l1_regularization(),
l2=self._symmetric_l2_regularization()))
return control_flow_ops.group(*update_ops)
def approximate_duality_gap(self):
"""Add operations to compute the approximate duality gap.
Returns:
An Operation that computes the approximate duality gap over all
examples.
"""
with name_scope('sdca/approximate_duality_gap'):
_, values_list = self._hashtable.export_sharded()
shard_sums = []
for values in values_list:
with ops.device(values.device):
# For large tables to_double() below allocates a large temporary
# tensor that is freed once the sum operation completes. To reduce
# peak memory usage in cases where we have multiple large tables on a
# single device, we serialize these operations.
# Note that we need double precision to get accurate results.
with ops.control_dependencies(shard_sums):
shard_sums.append(
math_ops.reduce_sum(math_ops.to_double(values), 0))
summed_values = math_ops.add_n(shard_sums)
primal_loss = summed_values[1]
dual_loss = summed_values[2]
example_weights = summed_values[3]
# Note: we return NaN if there are no weights or all weights are 0, e.g.
# if no examples have been processed
return (primal_loss + dual_loss + self._l1_loss() +
(2.0 * self._l2_loss(self._symmetric_l2_regularization()))
) / example_weights
def unregularized_loss(self, examples):
"""Add operations to compute the loss (without the regularization loss).
Args:
examples: Examples to compute unregularized loss on.
Returns:
An Operation that computes mean (unregularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified([
'example_labels', 'example_weights', 'sparse_features', 'dense_features'
], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/unregularized_loss'):
predictions = math_ops.cast(
self._linear_predictions(examples), dtypes.float64)
labels = math_ops.cast(
internal_convert_to_tensor(examples['example_labels']),
dtypes.float64)
weights = math_ops.cast(
internal_convert_to_tensor(examples['example_weights']),
dtypes.float64)
if self._options['loss_type'] == 'logistic_loss':
return math_ops.reduce_sum(math_ops.multiply(
sigmoid_cross_entropy_with_logits(labels=labels,
logits=predictions),
weights)) / math_ops.reduce_sum(weights)
if self._options['loss_type'] in ['hinge_loss', 'smooth_hinge_loss']:
# hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
# first convert 0/1 labels into -1/1 labels.
all_ones = array_ops.ones_like(predictions)
adjusted_labels = math_ops.subtract(2 * labels, all_ones)
# Tensor that contains (unweighted) error (hinge loss) per
# example.
error = nn_ops.relu(
math_ops.subtract(all_ones,
math_ops.multiply(adjusted_labels, predictions)))
weighted_error = math_ops.multiply(error, weights)
return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
weights)
# squared loss
err = math_ops.subtract(labels, predictions)
weighted_squared_err = math_ops.multiply(math_ops.square(err), weights)
# SDCA squared loss function is sum(err^2) / (2*sum(weights))
return (math_ops.reduce_sum(weighted_squared_err) /
(2.0 * math_ops.reduce_sum(weights)))
def regularized_loss(self, examples):
"""Add operations to compute the loss with regularization loss included.
Args:
examples: Examples to compute loss on.
Returns:
An Operation that computes mean (regularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified([
'example_labels', 'example_weights', 'sparse_features', 'dense_features'
], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/regularized_loss'):
weights = internal_convert_to_tensor(examples['example_weights'])
return ((
self._l1_loss() +
# Note that here we are using the raw regularization
# (as specified by the user) and *not*
# self._symmetric_l2_regularization().
self._l2_loss(self._options['symmetric_l2_regularization'])) /
math_ops.reduce_sum(math_ops.cast(weights, dtypes.float64)) +
self.unregularized_loss(examples))
|
AirtrafficAnalysisGroup/graph_tools_networkx | refs/heads/master | readdbexample.py | 1 | import sqlite3
import sys
import scipy
import numpy as np
import scipy.stats
database_path='database/'
if len(sys.argv) == 2:
database_path += sys.argv[1]
else:
database_path += 'bfdatabase.db'
# recovery from great recession?
# so for like all the markets (i.e. all market ids)
# get the sum(in_ and out_degree people), sum(in_ and out_ fares)
# if it drops more than a certain percent -- say there is decline there
conn = sqlite3.connect(database_path)
c = conn.cursor()
#for row in c.execute('SELECT year FROM econ_data'):
# print(row)
#print(list(map(lambda x: x[0], c.description)))
c.execute('SELECT fl_id FROM lookup WHERE fl_name=?', ('Los Angeles',))
air_id = (c.fetchone())[0]
# for all markets
for year in range(1994,2016):
for q in range(1,5):
total_out = 0
total_in = 0
rows_fl = c.execute('SELECT in_degree_people, out_degree_people FROM airports WHERE year=? AND market_id=? AND quarter=?', (year,air_id, q))
for pair in rows_fl:
total_in += pair[0]
total_out += pair[1]
rows_ec = c.execute('SELECT mean_value FROM econ_data WHERE year=? AND MSA_id_airline=? AND quarter=?', (year, int(air_id), q))
total_ec = 0
for value in rows_ec:
total_ec += value[0]
out = str(year)+"Q"+str(q)+","+str(total_in+total_out)+","+str(total_ec/4)
print(out)
conn.close()
|
estaban/pyload | refs/heads/stable | module/plugins/hoster/EasybytezCom.py | 1 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
class EasybytezCom(XFileSharingPro):
__name__ = "EasybytezCom"
__type__ = "hoster"
__pattern__ = r'http://(?:www\.)?easybytez.com/(\w+).*'
__version__ = "0.17"
__description__ = """Easybytez.com hoster plugin"""
__author_name__ = ("zoidberg", "stickell")
__author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
HOSTER_NAME = "easybytez.com"
FILE_INFO_PATTERN = r'<span class="name">(?P<N>.+)</span><br>\s*<span class="size">(?P<S>[^<]+)</span>'
OFFLINE_PATTERN = r'<h1>File not available</h1>'
LINK_PATTERN = r'(http://(\w+\.(easyload|easybytez|zingload)\.(com|to)|\d+\.\d+\.\d+\.\d+)/files/\d+/\w+/[^"<]+)'
OVR_LINK_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)'
ERROR_PATTERN = r'(?:class=["\']err["\'][^>]*>|<Center><b>)(.*?)</'
def setup(self):
self.resumeDownload = self.multiDL = self.premium
getInfo = create_getInfo(EasybytezCom)
|
h-hirokawa/ansible | refs/heads/devel | lib/ansible/constants.py | 2 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
from string import ascii_letters, digits
from ansible.compat.six import string_types
from ansible.compat.six.moves import configparser
from ansible.parsing.quoting import unquote
from ansible.errors import AnsibleOptionsError
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes", "on" ]:
return True
else:
return False
def shell_expand(path, expand_relative_paths=False):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
if expand_relative_paths and not path.startswith('/'):
# paths are always 'relative' to the config?
if 'CONFIG_FILE' in globals():
CFGDIR = os.path.dirname(CONFIG_FILE)
path = os.path.join(CFGDIR, path)
path = os.path.abspath(path)
return path
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False, ispathlist=False, istmppath=False, expand_relative_paths=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
value = mk_boolean(value)
if value:
if integer:
value = int(value)
elif floating:
value = float(value)
elif islist:
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif isnone:
if value == "None":
value = None
elif ispath:
value = shell_expand(value)
elif istmppath:
value = shell_expand(value)
if not os.path.exists(value):
os.makedirs(value, 0o700)
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
elif ispathlist:
if isinstance(value, string_types):
value = [shell_expand(x, expand_relative_paths=expand_relative_paths) \
for x in value.split(os.pathsep)]
elif isinstance(value, string_types):
value = unquote(value)
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
path1 = os.getcwd() + "/ansible.cfg"
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# the default whitelist for cow stencils
DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant',
'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep',
'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder',
'vader-koala', 'vader', 'www',]
# sections in config file
DEFAULTS='defaults'
# FIXME: add deprecation warning when these get set
#### DEPRECATED VARS ####
# use more sanely named 'inventory'
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True)
# this is not used since 0.5 but people might still have in config
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
#### GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispathlist=True)
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispathlist=True, expand_relative_paths=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '$HOME/.ansible/tmp', istmppath=True)
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_MODULE_SET_LOCALE = get_config(p, DEFAULTS, 'module_set_locale','ANSIBLE_MODULE_SET_LOCALE',False, boolean=True)
DEFAULT_MODULE_COMPRESSION= get_config(p, DEFAULTS, 'module_compression', None, 'ZIP_DEFLATED')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, ispath=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, ispath=True)
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True)
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GATHER_SUBSET', 'all').lower()
DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, integer=True)
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', ispath=True)
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True)
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, integer=True)
# static includes
DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, boolean=True)
DEFAULT_HANDLER_INCLUDES_STATIC = get_config(p, DEFAULTS, 'handler_includes_static', 'ANSIBLE_HANDLER_INCLUDES_STATIC', False, boolean=True)
# disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True)
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True)
ALLOW_WORLD_READABLE_TMPFILES = get_config(p, DEFAULTS, 'allow_world_readable_tmpfiles', None, False, boolean=True)
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True)
DEFAULT_LIBVIRT_LXC_NOSECLABEL = get_config(p, 'selinux', 'libvirt_lxc_noseclabel', 'LIBVIRT_LXC_NOSECLABEL', False, boolean=True)
### PRIVILEGE ESCALATION ###
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H -S -n')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
# Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Permission denied', 'dzdo': ''} #FIXME: deal with i18n
BECOME_MISSING_STRINGS = {'sudo': 'sorry, a password is required to run sudo', 'su': '', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Authorization required', 'dzdo': ''} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas','doas','dzdo']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# PLUGINS
# Modules that can optimize with_items loops into a single call. Currently
# these modules must (1) take a "name" or "pkg" parameter that is a list. If
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apk, apt, dnf, package, pacman, pkgng, yum, zypper", islist=True)
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispathlist=True)
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispathlist=True)
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', ispathlist=True)
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', ispathlist=True)
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', ispathlist=True)
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', ispathlist=True)
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', ispathlist=True)
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', ispathlist=True)
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', ispathlist=True)
DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS', '~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', ispathlist=True)
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True)
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True)
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True)
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True)
ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default')
ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, islist=True)
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True)
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True)
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True)
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True)
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True)
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True)
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, ispath=True)
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, isnone=True)
DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'ANSIBLE_DISPLAY_ARGS_TO_STDOUT', False, boolean=True)
MAX_FILE_SIZE_FOR_DIFF = get_config(p, DEFAULTS, 'max_diff_size', 'ANSIBLE_MAX_DIFF_SIZE', 1024*1024, integer=True)
# CONNECTION RELATED
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-C -o ControlMaster=auto -o ControlPersist=60s')
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True)
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
PARAMIKO_PROXY_COMMAND = get_config(p, 'paramiko_connection', 'proxy_command', 'ANSIBLE_PARAMIKO_PROXY_COMMAND', None)
# obsolete -- will be formally removed
ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True)
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True)
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
# galaxy related
GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, boolean=True)
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True)
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True )
# colors
COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white')
COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow')
COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green')
COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red')
COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan')
# diff
DIFF_CONTEXT = get_config(p, 'diff', 'context', 'ANSIBLE_DIFF_CONTEXT', 3, integer=True)
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
# module search
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "test-docs.sh"]
|
chromium/chromium | refs/heads/master | remoting/host/installer/linux/build_deb.py | 5 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A simple python wrapper so GN can run build-deb.sh."""
import os
import subprocess
import sys
def main():
this_dir = os.path.dirname(os.path.abspath(__file__))
build_deb_script = os.path.join(this_dir, 'build-deb.sh')
proc = subprocess.Popen([build_deb_script] + sys.argv[1:],
stdout=subprocess.PIPE)
out, _ = proc.communicate()
sys.stdout.write(out.decode('utf8').strip())
return proc.returncode
if __name__ == '__main__':
sys.exit(main())
|
LeetCoinTeam/lc_api_examples | refs/heads/master | python_appengine/jinja2/__init__.py | 107 | # -*- coding: utf-8 -*-
"""
jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = '2.6'
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction'
]
|
codexns/sublime-coverage | refs/heads/master | st2_linux_x32/coverage/version.py | 10 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""The version and URL for coverage.py"""
# This file is exec'ed in setup.py, don't import anything!
# Same semantics as sys.version_info.
version_info = (4, 0, 0, 'beta', 2)
def _make_version(major, minor, micro, releaselevel, serial):
"""Create a readable version string from version_info tuple components."""
assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
version = "%d.%d" % (major, minor)
if micro:
version += ".%d" % (micro,)
if releaselevel != 'final':
version += "%s%d" % (releaselevel[0], serial)
return version
def _make_url(major, minor, micro, releaselevel, serial):
"""Make the URL people should start at for this version of coverage.py."""
url = "https://coverage.readthedocs.org"
if releaselevel != 'final':
# For pre-releases, use a version-specific URL.
url += "/en/coverage-" + _make_version(major, minor, micro, releaselevel, serial)
return url
__version__ = _make_version(*version_info)
__url__ = _make_url(*version_info)
|
mmuellner/vigra | refs/heads/master | vigranumpy/test/test2.py | 13 | #######################################################################
#
# Copyright 2009-2010 by Ullrich Koethe
#
# This file is part of the VIGRA computer vision library.
# The VIGRA Website is
# http://hci.iwr.uni-heidelberg.de/vigra/
# Please direct questions, bug reports, and contributions to
# ullrich.koethe@iwr.uni-heidelberg.de or
# vigra@informatik.uni-hamburg.de
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#######################################################################
import sys
print >> sys.stderr, "\nexecuting test file", __file__
execfile('set_paths.py')
from nose.tools import assert_equal, raises
import numpy as np
from vigra import *
from vigra.filters import *
from vigra.sampling import *
from vigra.noise import *
import vigra.arraytypes as at
#in the hope, that functions are tested in C++, we basicly test return types
#image=readImage("/export/home/nhuesken/sas/experiments/testdata/bmpmultilabel.bmp")
image=at.RGBImage(np.random.rand(100,100,3)*255,dtype=np.float32)
scalar_image=at.ScalarImage(np.random.rand(100,100)*255,dtype=np.float32)
volume256=at.Volume(np.random.rand(100,100,100)*255,dtype=np.uint8)
volumeBin=at.Volume(np.random.rand(100,100,100))>0.5
def checkImages(i1,i2):
assert(i1.shape==i2.shape)
assert(np.sum(i1==i2)!=0)
def checkAboutSame(i1,i2):
assert(i1.shape==i2.shape)
difference=np.sum(np.abs(i1-i2))/float(np.size(i1))
assert(difference<5)
def test_simpleRotate():
i2=rotateImageSimple(image,orientation=RotationDirection.CLOCKWISE);
#simple basic test
assert(i2.shape[0] == image.shape[1])
assert(i2.shape[1] == image.shape[0])
#test, that they are compatible
i3=rotateImageSimple(i2,orientation=RotationDirection.COUNTER_CLOCKWISE);
checkImages(image,i3)
i2=rotateImageSimple(image,orientation=RotationDirection.UPSIDE_DOWN);
i3=rotateImageSimple(i2,orientation=RotationDirection.COUNTER_CLOCKWISE);
i2=rotateImageSimple(i3,orientation=RotationDirection.COUNTER_CLOCKWISE);
checkImages(image,i2)
def test_rotate():
i2=rotateImageSimple(image)
i3=i2
rotateImageDegree(image,degree=90,out=i3)
checkImages(i2,i3)
def test_resample():
#just testing the size
i2=resampleImage(image,factor=0.5)
assert(i2.shape[0]==image.shape[0]*0.5)
def test_resize():
i2=resizeImageNoInterpolation(image,shape=(image.shape[0]+10,image.shape[1]+10))
i2=resizeImageNoInterpolation(image,shape=(image.shape[0],image.shape[1]))
checkAboutSame(i2,image)
i2=resizeImageCatmullRomInterpolation(image,shape=(image.shape[0]+10,image.shape[1]+10))
i2=resizeImageCatmullRomInterpolation(image,shape=(image.shape[0],image.shape[1]))
checkAboutSame(i2,image)
i2=resizeImageCoscotInterpolation( image,shape=(image.shape[0]+10,image.shape[1]+10))
i2=resizeImageCoscotInterpolation( image,shape=(image.shape[0],image.shape[1]))
checkAboutSame(i2,image)
i2=resizeImageLinearInterpolation( image,shape=(image.shape[0]+10,image.shape[1]+10))
i2=resizeImageLinearInterpolation( image,shape=(image.shape[0],image.shape[1]))
checkAboutSame(i2,image)
i2=resizeImageSplineInterpolation(
image,shape=(image.shape[0]+10,image.shape[1]+10),order=4)
i2=resizeImageSplineInterpolation(
image,shape=(image.shape[0],image.shape[1]), order=4)
checkAboutSame(i2,image)
def test_2DMorphology():
i2=discErosion(image.astype(np.uint8),radius=2)
i3=(255-discDilation((256-image).astype(np.uint8),radius=2))
checkImages(i2,i3)
i2=discOpening(image.astype(np.uint8),radius=2)
i3=(255-discDilation((256-image).astype(np.uint8),radius=2))
checkImages(i2,i3)
def test_3DMorphologyBinary():
i3=0
i2=multiBinaryClosing(volumeBin,radius=2)
i3=multiBinaryOpening(volumeBin==False,radius=2)
i3=i3==False
checkImages(i2,i3)
def test_3DMorphologyGrayscale():
i2=multiGrayscaleErosion(volume256,sigma=2)
i3=(256-multiGrayscaleDilation(256-volume256,sigma=2))
checkImages(i2,i3)
i2=multiGrayscaleOpening(volume256,sigma=2)
i3=(256-multiGrayscaleClosing(256-volume256,sigma=2))
checkImages(i2,i3)
def test_Noise():
# ATM, we only test that these things run
image = RGBImage((100,100))
image[:50,:50] = (10, 10, 10)
image[50:,:50] = (20, 20, 20)
image[50:,50:] = (30, 30, 30)
image += np.random.normal(size=(100,100, 3))
noiseVarianceEstimation(image.bindAxis('c', 0))
noiseVarianceClustering(image.bindAxis('c', 0))
nonparametricNoiseNormalization(image)
quadraticNoiseNormalizationEstimated(image)
linearNoiseNormalizationEstimated(image)
quadraticNoiseNormalization(image,1.0,1.0,1.0)
linearNoiseNormalization(image,1.0,1.0)
def test_Kernel1D():
# we just test functions that were not directly imported
contents = np.array([-1,2,3,5,3,2,-1], dtype=np.float64)
k1 = Kernel1D()
k1.initExplicitly(-3,3, contents)
for k in xrange(-3, 4):
assert(k1[k]==contents[k+3])
k1[-2]=5
assert(k1[-2]==5)
def test_Kernel2D():
contents = np.array([[0,1,2],[3,4,5],[6,7,8]],dtype=np.float64)
k2=Kernel2D()
k2.initExplicitly((-1,-1),(1,1), contents)
for i in xrange(-1, 2):
for j in xrange(-1, 2):
assert(k2[i,j]==contents[i+1, j+1])
k2[0,-1]=-5
assert(k2[0,-1]==-5)
|
xingwu1/autorest | refs/heads/master | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyComplex/autorestcomplextestservice/models/date_wrapper.py | 8 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DateWrapper(Model):
"""DateWrapper.
:param field:
:type field: date
:param leap:
:type leap: date
"""
_attribute_map = {
'field': {'key': 'field', 'type': 'date'},
'leap': {'key': 'leap', 'type': 'date'},
}
def __init__(self, field=None, leap=None):
self.field = field
self.leap = leap
|
nico01f/nifo.github.io | refs/heads/master | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/scanner.py | 365 | # -*- coding: utf-8 -*-
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner(object):
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos."""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolen that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
|
2014c2g9/c2g9 | refs/heads/master | exts/w2/static/Brython2.0.0-20140209-164925/Lib/xml/dom/minidom.py | 727 | """Simple implementation of the Level 1 DOM.
Namespaces and other minor Level 2 features are also supported.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import io
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
# the node being added or removed, not the node being modified.)
#
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
def __bool__(self):
return True
def toxml(self, encoding=None):
return self.toprettyxml("", "", encoding)
def toprettyxml(self, indent="\t", newl="\n", encoding=None):
if encoding is None:
writer = io.StringIO()
else:
writer = io.TextIOWrapper(io.BytesIO(),
encoding=encoding,
errors="xmlcharrefreplace",
newline='\n')
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
self.writexml(writer, "", indent, newl, encoding)
else:
self.writexml(writer, "", indent, newl)
if encoding is None:
return writer.getvalue()
else:
return writer.detach().getvalue()
def hasChildNodes(self):
return bool(self.childNodes)
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if (newChild.nodeType in _nodeTypes_with_children
or oldChild.nodeType in _nodeTypes_with_children):
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
if not child.data:
# empty text node; discard
if L:
L[-1].nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = child.previousSibling
child.unlink()
elif L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = node
child.unlink()
else:
L.append(child)
else:
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
# Overridden in Element and Attr where localName can be Non-Null
return None
# Node interfaces from Level 3 (WD 9 April 2002)
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
# The "user data" functions use a dictionary that is only present
# if some user data has been set, so be careful not to assume it
# exists.
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
# ignore handlers passed for None
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, "_user_data"):
for key, (data, handler) in list(self._user_data.items()):
if handler is not None:
handler.handle(operation, key, data, src, dst)
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
# A Node is its own context manager, to ensure that an unlink() call occurs.
# This is similar to how a file object works.
def __enter__(self):
return self
def __exit__(self, et, ev, tb):
self.unlink()
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
defproperty(Node, "localName", doc="Namespace-local name of this node.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.previousSibling = last
last.nextSibling = node
childNodes.append(node)
node.parentNode = self
def _in_document(node):
# return True iff node is part of a document tree
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"Writes datachars to writer."
if data:
data = data.replace("&", "&").replace("<", "<"). \
replace("\"", """).replace(">", ">")
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
__slots__=('_name', '_value', 'namespaceURI',
'_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement')
nodeType = Node.ATTRIBUTE_NODE
attributes = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
prefix=None):
self.ownerElement = None
self._name = qName
self.namespaceURI = namespaceURI
self._prefix = prefix
self.childNodes = NodeList()
# Add the single child node that represents the value of the attr
self.childNodes.append(Text())
# nodeValue and value are set elsewhere
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.nodeName.split(":", 1)[-1]
def _get_name(self):
return self.name
def _get_specified(self):
return self.specified
def _get_name(self):
return self._name
def _set_name(self, value):
self._name = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
nodeName = name = property(_get_name, _set_name)
def _get_value(self):
return self._value
def _set_value(self, value):
self._value = value
self.childNodes[0].data = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
nodeValue = value = property(_get_value, _set_value)
def _get_prefix(self):
return self._prefix
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == "xmlns":
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix for the wrong namespace")
self._prefix = prefix
if prefix is None:
newName = self.localName
else:
newName = "%s:%s" % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.name = newName
prefix = property(_get_prefix, _set_prefix)
def unlink(self):
# This implementation does not call the base implementation
# since most of that is not needed, and the expense of the
# method call is not warranted. We duplicate the removal of
# children, but that's all we needed from the base class.
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[(self.namespaceURI, self.localName)]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
def _get_isId(self):
if self._is_id:
return True
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
else:
return info.isId(self.nodeName)
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
else:
return info.getAttributeType(self.nodeName)
defproperty(Attr, "isId", doc="True if this attribute is an ID.")
defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[list(self._attrs.keys())[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def __contains__(self, key):
if isinstance(key, str):
return key in self._attrs
else:
return key in self._attrsNS
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value=None):
return self._attrs.get(name, value)
__len__ = _get_length
def _cmp(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return (id(self) > id(other)) - (id(self) < id(other))
def __eq__(self, other):
return self._cmp(other) == 0
def __ge__(self, other):
return self._cmp(other) >= 0
def __gt__(self, other):
return self._cmp(other) > 0
def __le__(self, other):
return self._cmp(other) <= 0
def __lt__(self, other):
return self._cmp(other) < 0
def __ne__(self, other):
return self._cmp(other) != 0
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if isinstance(value, str):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError("value must be a string or Attr object")
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)]
except KeyError:
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[(n.namespaceURI, n.localName)]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[(n.namespaceURI, n.localName)]
del self._attrs[n.nodeName]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return self._attrs, self._attrsNS, self._ownerElement
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, "length",
doc="Number of nodes in the NamedNodeMap.")
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
else:
return "<TypeInfo %r>" % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
__slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix',
'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS',
'nextSibling', 'previousSibling')
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
localName=None):
self.parentNode = None
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self.nextSibling = self.previousSibling = None
# Attribute dictionaries are lazily created
# attributes are double-indexed:
# tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
self._attrs = None
self._attrsNS = None
def _ensure_attributes(self):
if self._attrs is None:
self._attrs = {}
self._attrsNS = {}
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.tagName.split(":", 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
if self._attrs is not None:
for attr in list(self._attrs.values()):
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
if self._attrs is None:
return ""
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return ""
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
attr.value = value # also sets nodeValue
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
attr.value = value
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
else:
if value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
attr.prefix = prefix
attr.nodeName = qualifiedName
def getAttributeNode(self, attrname):
if self._attrs is None:
return None
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return None
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
self._ensure_attributes()
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old1
if old2 is not attr:
return old2
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrsNS[(namespaceURI, localName)]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
# Restore this since the node is still useful and otherwise
# unlinked
node.ownerDocument = self.ownerDocument
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
if self._attrs is None:
return False
return name in self._attrs
def hasAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return False
return (namespaceURI, localName) in self._attrsNS
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def __repr__(self):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent+addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
self._ensure_attributes()
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
# DOM Level 3 attributes, based on the 22 Oct 2002 draft
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr._is_id = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
defproperty(Element, "attributes",
doc="NamedNodeMap of attributes on the element.")
defproperty(Element, "localName",
doc="Namespace-local name of this element.")
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._ensure_attributes()
element._attrs[attr.name] = attr
element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.ownerElement = element
class Childless:
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
__slots__ = ()
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(
self.nodeName + " nodes do not have children")
def normalize(self):
# For childless nodes, normalize() has nothing to do.
pass
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
__slots__ = ('target', 'data')
def __init__(self, target, data):
self.target = target
self.data = data
# nodeValue is an alias for data
def _get_nodeValue(self):
return self.data
def _set_nodeValue(self, value):
self.data = data
nodeValue = property(_get_nodeValue, _set_nodeValue)
# nodeName is an alias for target
def _get_nodeName(self):
return self.target
def _set_nodeName(self, value):
self.target = value
nodeName = property(_get_nodeName, _set_nodeName)
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Childless, Node):
__slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling')
def __init__(self):
self.ownerDocument = self.parentNode = None
self.previousSibling = self.nextSibling = None
self._data = ''
Node.__init__(self)
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
data = nodeValue = property(_get_data, _set_data)
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return '<DOM %s node "%r%s">' % (
self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
defproperty(CharacterData, "length", doc="Length of the string data.")
class Text(CharacterData):
__slots__ = ()
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
# DOM Level 3 (WD 9 April 2002)
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
# XXX This needs to be seriously changed if minidom ever
# supports EntityReference nodes.
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
self.data = content
return self
else:
return None
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
else:
return info.isElementContent()
defproperty(Text, "isWhitespaceInElementContent",
doc="True iff this text node contains only whitespace"
" and is in element content.")
defproperty(Text, "wholeText",
doc="The text of all logically-adjacent text nodes.")
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return None
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return None
class Comment(CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
def __init__(self, data):
CharacterData.__init__(self)
self._data = data
def writexml(self, writer, indent="", addindent="", newl=""):
if "--" in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
class CDATASection(Text):
__slots__ = ()
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
if self.data.find("]]>") >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write("<![CDATA[%s]]>" % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
# seq should be a list or tuple
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError(name_or_tuple)
return node
def item(self, index):
if index < 0:
return None
try:
return self._seq[index]
except IndexError:
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, "length",
doc="Number of entries in the NamedNodeMap.")
class Identified:
"""Mix-in class that supports the publicId and systemId attributes."""
__slots__ = 'publicId', 'systemId'
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
# it's ok
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return None
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("<!DOCTYPE ")
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'"
% (newl, self.publicId, newl, self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(" [")
writer.write(self.internalSubset)
writer.write("]")
writer.write(">"+newl)
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr(
"cannot append children to an entity node")
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
"cannot insert children below an entity node")
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot remove children from an entity node")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot replace children of an entity node")
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [("core", "1.0"),
("core", "2.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._create_document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise xml.dom.InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# DOM Level 3 (WD 9 April 2002)
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
# internal
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = 'tagName',
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribute is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack= None
class Document(Node, DocumentLS):
__slots__ = ('_elem_info', 'doctype',
'_id_search_stack', 'childNodes', '_id_cache')
_child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
implementation = DOMImplementation()
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
parentNode = None
previousSibling = nextSibling = None
# Document attributes from Level 3 (WD 9 April 2002)
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.doctype = None
self.childNodes = NodeList()
# mapping of (namespaceURI, localName) -> ElementInfo
# and tagName -> ElementInfo
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
def _get_elem_info(self, element):
if element.namespaceURI:
key = element.namespaceURI, element.localName
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
if node.parentNode is not None:
# This needs to be done before the next test since this
# may *be* the document element, in which case it should
# end up re-ordered to the end.
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def cloneNode(self, deep):
if not deep:
return None
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
assert childclone.ownerDocument.isSameNode(clone)
clone.childNodes.append(childclone)
if childclone.nodeType == Node.DOCUMENT_NODE:
assert clone.documentElement is None
elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
assert clone.doctype is None
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
if not (self._elem_info or self._magic_id_count):
return None
stack = self._id_search_stack
if stack is None:
# we never searched before, or the cache has been cleared
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
# Previous search was completed and cache is still valid;
# no matching node.
return None
result = None
while stack:
node = stack.pop()
# add child elements to stack for continued searching
stack.extend([child for child in node.childNodes
if child.nodeType in _nodeTypes_with_children])
# check this node
info = self._get_elem_info(node)
if info:
# We have to process all ID attributes before
# returning in order to get all the attributes set to
# be IDs using Element.setIdAttribute*().
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr("cannot import document nodes")
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
def writexml(self, writer, indent="", addindent="", newl="", encoding=None):
if encoding is None:
writer.write('<?xml version="1.0" ?>'+newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (
encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# DOM Level 3 (WD 9 April 2002)
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr(
"cannot rename nodes from other documents;\n"
"expected %s,\nfound %s" % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr(
"renameNode() only applies to element and attribute nodes")
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if ( prefix == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE):
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix")
else:
if ( name == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE
and n.nodeType == Node.ATTRIBUTE_NODE):
raise xml.dom.NamespaceErr(
"illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
n.prefix = prefix
n._localName = localName
n.namespaceURI = namespaceURI
n.nodeName = name
if n.nodeType == Node.ELEMENT_NODE:
n.tagName = name
else:
# attribute node
n.name = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
# It's not clear from a semantic perspective whether we should
# call the user data handlers for the NODE_RENAMED event since
# we're re-using the existing node. The draft spec has been
# interpreted as meaning "no, don't call the handler unless a
# new node is created."
return n
defproperty(Document, "documentElement",
doc="Top-level element of this document.")
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implementation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
def parseString(string, parser=None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,),
{'parser': parser})
def getDOMImplementation(features=None):
if features:
if isinstance(features, str):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
|
mkoistinen/aldryn-faq | refs/heads/master | aldryn_faq/forms.py | 1 | from django import forms
from hvad.forms import TranslatableModelForm
from sortedm2m.forms import SortedMultipleChoiceField
from .models import Category, QuestionListPlugin, Question
class CategoryAdminForm(TranslatableModelForm):
class Meta:
model = Category
def clean_slug(self):
slug = self.cleaned_data['slug']
translations_model = Category._meta.translations_model
categories_with_slug = translations_model.objects.filter(slug=slug)
if self.instance.pk:
# Make sure to exclude references from this master :)
categories_with_slug = categories_with_slug.exclude(master_id=self.instance.pk)
if categories_with_slug.exists():
raise forms.ValidationError('A category with this slug already exists.')
return slug
class QuestionListPluginForm(forms.ModelForm):
questions = SortedMultipleChoiceField(queryset=Question.objects.none())
class Meta:
model = QuestionListPlugin
def __init__(self, *args, **kwargs):
super(QuestionListPluginForm, self).__init__(*args, **kwargs)
questions_field = self.fields['questions']
questions_field.queryset = Question.objects.language()
|
wwj718/edx-platform | refs/heads/master | common/djangoapps/embargo/messages.py | 148 | """Define messages for restricted courses.
These messages are displayed to users when they are blocked
from either enrolling in or accessing a course.
"""
from collections import namedtuple
BlockedMessage = namedtuple('BlockedMessage', [
# A user-facing description of the message
'description',
# The mako template used to render the message
'template',
])
ENROLL_MESSAGES = {
'default': BlockedMessage(
description='Default',
template='embargo/default_enrollment.html'
),
'embargo': BlockedMessage(
description='Embargo',
template='static_templates/embargo.html'
)
}
COURSEWARE_MESSAGES = {
'default': BlockedMessage(
description='Default',
template='embargo/default_courseware.html'
),
'embargo': BlockedMessage(
description='Embargo',
template='static_templates/embargo.html'
)
}
# Backwards compatibility with themes
# created for earlier implementations of the embargo app.
CUSTOM_THEME_OVERRIDES = {
'embargo': BlockedMessage(
description='Embargo',
template='static_templates/theme-embargo.html'
)
}
|
termie/pupa | refs/heads/master | nova/rpc.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""AMQP-based RPC.
Queues have consumers and publishers.
No fan-out support yet.
"""
import json
import sys
import time
import traceback
import uuid
from carrot import connection as carrot_connection
from carrot import messaging
from eventlet import greenpool
from eventlet import greenthread
from nova import context
from nova import exception
from nova import fakerabbit
from nova import flags
from nova import log as logging
from nova import utils
LOG = logging.getLogger('nova.rpc')
FLAGS = flags.FLAGS
flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool')
flags.DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
flags.DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
flags.DEFINE_integer('rabbit_port', 5672, 'rabbit port')
flags.DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
flags.DEFINE_string('rabbit_password', 'guest', 'rabbit password')
flags.DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
flags.DEFINE_integer('rabbit_retry_interval', 10,
'rabbit connection retry interval')
flags.DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
flags.DEFINE_string('control_exchange', 'nova',
'the main exchange to connect to')
class Connection(carrot_connection.BrokerConnection):
"""Connection instance object."""
@classmethod
def instance(cls, new=True):
"""Returns the instance."""
if new or not hasattr(cls, '_instance'):
params = {}
if FLAGS.fake_rabbit:
params['backend_cls'] = fakerabbit.Backend
else:
params.update(dict(hostname=FLAGS.rabbit_host,
port=FLAGS.rabbit_port,
userid=FLAGS.rabbit_userid,
password=FLAGS.rabbit_password,
virtual_host=FLAGS.rabbit_virtual_host))
# NOTE(vish): magic is fun!
# pylint: disable=W0142
if new:
return cls(**params)
else:
cls._instance = cls(**params)
return cls._instance
@classmethod
def recreate(cls):
"""Recreates the connection instance.
This is necessary to recover from some network errors/disconnects.
"""
try:
del cls._instance
except AttributeError, e:
# The _instance stuff is for testing purposes. Usually we don't use
# it. So don't freak out if it doesn't exist.
pass
return cls.instance()
class Consumer(messaging.Consumer):
"""Consumer base class.
Contains methods for connecting the fetch method to async loops.
"""
def __init__(self, *args, **kwargs):
for i in xrange(FLAGS.rabbit_max_retries):
if i > 0:
time.sleep(FLAGS.rabbit_retry_interval)
try:
super(Consumer, self).__init__(*args, **kwargs)
self.failed_connection = False
break
except Exception as e: # Catching all because carrot sucks
fl_host = FLAGS.rabbit_host
fl_port = FLAGS.rabbit_port
fl_intv = FLAGS.rabbit_retry_interval
LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is'
' unreachable: %(e)s. Trying again in %(fl_intv)d'
' seconds.') % locals())
self.failed_connection = True
if self.failed_connection:
LOG.error(_('Unable to connect to AMQP server '
'after %d tries. Shutting down.'),
FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
"""Wraps the parent fetch with some logic for failed connection."""
# TODO(vish): the logic for failed connections and logging should be
# refactored into some sort of connection manager object
try:
if self.failed_connection:
# NOTE(vish): connection is defined in the parent class, we can
# recreate it as long as we create the backend too
# pylint: disable=W0201
self.connection = Connection.recreate()
self.backend = self.connection.create_backend()
self.declare()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection:
LOG.error(_('Reconnected to queue'))
self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't
# want exceptions to be logged 10 times a second if some
# persistent failure occurs.
except Exception, e: # pylint: disable=W0703
if not self.failed_connection:
LOG.exception(_('Failed to fetch message from queue: %s' % e))
self.failed_connection = True
def attach_to_eventlet(self):
"""Only needed for unit tests!"""
timer = utils.LoopingCall(self.fetch, enable_callbacks=True)
timer.start(0.1)
return timer
class AdapterConsumer(Consumer):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, connection=None, topic='broadcast', proxy=None):
LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
self.proxy = proxy
self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
super(AdapterConsumer, self).__init__(connection=connection,
topic=topic)
def receive(self, *args, **kwargs):
self.pool.spawn_n(self._receive, *args, **kwargs)
@exception.wrap_exception
def _receive(self, message_data, message):
"""Magically looks for a method on the proxy object and calls it.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
LOG.debug(_('received %s') % message_data)
msg_id = message_data.pop('_msg_id', None)
ctxt = _unpack_context(message_data)
method = message_data.get('method')
args = message_data.get('args', {})
message.ack()
if not method:
# NOTE(vish): we may not want to ack here, but that means that bad
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
LOG.warn(_('no method for message: %s') % message_data)
msg_reply(msg_id, _('No method for message: %s') % message_data)
return
node_func = getattr(self.proxy, str(method))
node_args = dict((str(k), v) for k, v in args.iteritems())
# NOTE(vish): magic is fun!
try:
rval = node_func(context=ctxt, **node_args)
if msg_id:
msg_reply(msg_id, rval, None)
except Exception as e:
logging.exception('Exception during message handling')
if msg_id:
msg_reply(msg_id, None, sys.exc_info())
return
class Publisher(messaging.Publisher):
"""Publisher base class."""
pass
class TopicAdapterConsumer(AdapterConsumer):
"""Consumes messages on a specific topic."""
exchange_type = 'topic'
def __init__(self, connection=None, topic='broadcast', proxy=None):
self.queue = topic
self.routing_key = topic
self.exchange = FLAGS.control_exchange
self.durable = False
super(TopicAdapterConsumer, self).__init__(connection=connection,
topic=topic, proxy=proxy)
class FanoutAdapterConsumer(AdapterConsumer):
"""Consumes messages from a fanout exchange."""
exchange_type = 'fanout'
def __init__(self, connection=None, topic='broadcast', proxy=None):
self.exchange = '%s_fanout' % topic
self.routing_key = topic
unique = uuid.uuid4().hex
self.queue = '%s_fanout_%s' % (topic, unique)
self.durable = False
LOG.info(_('Created "%(exchange)s" fanout exchange '
'with "%(key)s" routing key'),
dict(exchange=self.exchange, key=self.routing_key))
super(FanoutAdapterConsumer, self).__init__(connection=connection,
topic=topic, proxy=proxy)
class TopicPublisher(Publisher):
"""Publishes messages on a specific topic."""
exchange_type = 'topic'
def __init__(self, connection=None, topic='broadcast'):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
self.durable = False
super(TopicPublisher, self).__init__(connection=connection)
class FanoutPublisher(Publisher):
"""Publishes messages to a fanout exchange."""
exchange_type = 'fanout'
def __init__(self, topic, connection=None):
self.exchange = '%s_fanout' % topic
self.queue = '%s_fanout' % topic
self.durable = False
LOG.info(_('Creating "%(exchange)s" fanout exchange'),
dict(exchange=self.exchange))
super(FanoutPublisher, self).__init__(connection=connection)
class DirectConsumer(Consumer):
"""Consumes messages directly on a channel specified by msg_id."""
exchange_type = 'direct'
def __init__(self, connection=None, msg_id=None):
self.queue = msg_id
self.routing_key = msg_id
self.exchange = msg_id
self.auto_delete = True
self.exclusive = True
super(DirectConsumer, self).__init__(connection=connection)
class DirectPublisher(Publisher):
"""Publishes messages directly on a channel specified by msg_id."""
exchange_type = 'direct'
def __init__(self, connection=None, msg_id=None):
self.routing_key = msg_id
self.exchange = msg_id
self.auto_delete = True
super(DirectPublisher, self).__init__(connection=connection)
def msg_reply(msg_id, reply=None, failure=None):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
if failure:
message = str(failure[1])
tb = traceback.format_exception(*failure)
LOG.error(_("Returning exception %s to caller"), message)
LOG.error(tb)
failure = (failure[0].__name__, str(failure[1]), tb)
conn = Connection.instance()
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
try:
publisher.send({'result': reply, 'failure': failure})
except TypeError:
publisher.send(
{'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure})
publisher.close()
class RemoteError(exception.Error):
"""Signifies that a remote class has raised an exception.
Containes a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevent info.
"""
def __init__(self, exc_type, value, traceback):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__('%s %s\n%s' % (exc_type,
value,
traceback))
def _unpack_context(msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
LOG.debug(_('unpacked context: %s'), context_dict)
return context.RequestContext.from_dict(context_dict)
def _pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context)
def call(context, topic, msg):
"""Sends a message on a topic and wait for a response."""
LOG.debug(_('Making asynchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_pack_context(msg, context)
class WaitMessage(object):
def __call__(self, data, message):
"""Acks message and sets result."""
message.ack()
if data['failure']:
self.result = RemoteError(*data['failure'])
else:
self.result = data['result']
wait_msg = WaitMessage()
conn = Connection.instance()
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
consumer.register_callback(wait_msg)
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
publisher.send(msg)
publisher.close()
try:
consumer.wait(limit=1)
except StopIteration:
pass
consumer.close()
# NOTE(termie): this is a little bit of a change from the original
# non-eventlet code where returning a Failure
# instance from a deferred call is very similar to
# raising an exception
if isinstance(wait_msg.result, Exception):
raise wait_msg.result
return wait_msg.result
def cast(context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_pack_context(msg, context)
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
publisher.send(msg)
publisher.close()
def fanout_cast(context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_pack_context(msg, context)
conn = Connection.instance()
publisher = FanoutPublisher(topic, connection=conn)
publisher.send(msg)
publisher.close()
def generic_response(message_data, message):
"""Logs a result and exits."""
LOG.debug(_('response %s'), message_data)
message.ack()
sys.exit(0)
def send_message(topic, message, wait=True):
"""Sends a message for testing."""
msg_id = uuid.uuid4().hex
message.update({'_msg_id': msg_id})
LOG.debug(_('topic is %s'), topic)
LOG.debug(_('message %s'), message)
if wait:
consumer = messaging.Consumer(connection=Connection.instance(),
queue=msg_id,
exchange=msg_id,
auto_delete=True,
exchange_type='direct',
routing_key=msg_id)
consumer.register_callback(generic_response)
publisher = messaging.Publisher(connection=Connection.instance(),
exchange=FLAGS.control_exchange,
durable=False,
exchange_type='topic',
routing_key=topic)
publisher.send(message)
publisher.close()
if wait:
consumer.wait()
if __name__ == '__main__':
# You can send messages from the command line using
# topic and a json string representing a dictionary
# for the method
send_message(sys.argv[1], json.loads(sys.argv[2]))
|
google/neural-light-transport | refs/heads/master | third_party/robust_loss/fit_partition_spline_test.py | 1 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fit_partition_spline.py."""
import tensorflow.compat.v2 as tf
from robust_loss import distribution
from robust_loss import fit_partition_spline
tf.enable_v2_behavior()
class FitPartitionSplineTest(tf.test.TestCase):
def testNumericalPartitionIsAccurate(self):
"""Test _numerical_base_partition_function against some golden data."""
for (numer, denom) in [(0, 1), (1, 8), (1, 2), (1, 1), (2, 1), (8, 1)]:
alpha = tf.cast(numer, tf.float64) / tf.cast(denom, tf.float64)
z_true = distribution.analytical_base_partition_function(numer, denom)
z = fit_partition_spline.numerical_base_partition_function(alpha)
self.assertAllClose(z, z_true, atol=1e-10, rtol=1e-10)
if __name__ == '__main__':
tf.test.main()
|
RafaelTorrealba/odoo | refs/heads/8.0 | addons/account/report/common_report_header.py | 386 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
# Mixin to use with rml_parse, so self.pool will be defined.
class common_report_header(object):
def _sum_debit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
self.cr.execute('SELECT SUM(debit) FROM account_move_line l '
'WHERE period_id IN %s AND journal_id IN %s ' + self.query_get_clause + ' ',
(tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
self.cr.execute('SELECT SUM(credit) FROM account_move_line l '
'WHERE period_id IN %s AND journal_id IN %s '+ self.query_get_clause+'',
(tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def _get_start_date(self, data):
if data.get('form', False) and data['form'].get('date_from', False):
return data['form']['date_from']
return ''
def _get_target_move(self, data):
if data.get('form', False) and data['form'].get('target_move', False):
if data['form']['target_move'] == 'all':
return _('All Entries')
return _('All Posted Entries')
return ''
def _get_end_date(self, data):
if data.get('form', False) and data['form'].get('date_to', False):
return data['form']['date_to']
return ''
def get_start_period(self, data):
if data.get('form', False) and data['form'].get('period_from', False):
return self.pool.get('account.period').browse(self.cr,self.uid,data['form']['period_from']).name
return ''
def get_end_period(self, data):
if data.get('form', False) and data['form'].get('period_to', False):
return self.pool.get('account.period').browse(self.cr, self.uid, data['form']['period_to']).name
return ''
def _get_account(self, data):
if data.get('form', False) and data['form'].get('chart_account_id', False):
return self.pool.get('account.account').browse(self.cr, self.uid, data['form']['chart_account_id']).name
return ''
def _get_sortby(self, data):
raise (_('Error!'), _('Not implemented.'))
def _get_filter(self, data):
if data.get('form', False) and data['form'].get('filter', False):
if data['form']['filter'] == 'filter_date':
return self._translate('Date')
elif data['form']['filter'] == 'filter_period':
return self._translate('Periods')
return self._translate('No Filters')
def _sum_debit_period(self, period_id, journal_id=None):
journals = journal_id or self.journal_ids
if not journals:
return 0.0
self.cr.execute('SELECT SUM(debit) FROM account_move_line l '
'WHERE period_id=%s AND journal_id IN %s '+ self.query_get_clause +'',
(period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit_period(self, period_id, journal_id=None):
journals = journal_id or self.journal_ids
if not journals:
return 0.0
self.cr.execute('SELECT SUM(credit) FROM account_move_line l '
'WHERE period_id=%s AND journal_id IN %s ' + self.query_get_clause +' ',
(period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
def _get_fiscalyear(self, data):
if data.get('form', False) and data['form'].get('fiscalyear_id', False):
return self.pool.get('account.fiscalyear').browse(self.cr, self.uid, data['form']['fiscalyear_id']).name
return ''
def _get_company(self, data):
if data.get('form', False) and data['form'].get('chart_account_id', False):
return self.pool.get('account.account').browse(self.cr, self.uid, data['form']['chart_account_id']).company_id.name
return ''
def _get_journal(self, data):
codes = []
if data.get('form', False) and data['form'].get('journal_ids', False):
self.cr.execute('select code from account_journal where id IN %s',(tuple(data['form']['journal_ids']),))
codes = [x for x, in self.cr.fetchall()]
return codes
def _get_currency(self, data):
if data.get('form', False) and data['form'].get('chart_account_id', False):
return self.pool.get('account.account').browse(self.cr, self.uid, data['form']['chart_account_id']).company_id.currency_id.symbol
return ''
#vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
vmax-feihu/hue | refs/heads/master | desktop/core/src/desktop/lib/json_utils.py | 35 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
##
## Straight copy from simplejson 2.1.0 as we are with 2.0.9 and Python 2.4
##
class JSONEncoderForHTML(json.JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o) # .iterencode(o, True) because of 2.4
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o) # .iterencode(o, _one_shot) because of 2.4
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
|
pbrod/scipy | refs/heads/master | benchmarks/benchmarks/integrate.py | 10 | from __future__ import division, absolute_import, print_function
import numpy as np
from .common import Benchmark
from scipy.integrate import quad
try:
import ctypes
import scipy.integrate._test_multivariate as clib_test
from scipy._lib import _test_ccallback_cython
except ImportError:
_test_ccallback_cython = None
try:
from scipy import LowLevelCallable
from_cython = LowLevelCallable.from_cython
except ImportError:
LowLevelCallable = lambda func, data: (func, data)
from_cython = lambda *a: a
try:
import cffi
except ImportError:
cffi = None
try:
from scipy.integrate import solve_bvp
except ImportError:
pass
class SolveBVP(Benchmark):
TOL = 1e-5
def fun_flow(self, x, y, p):
A = p[0]
return np.vstack((
y[1], y[2], 100 * (y[1] ** 2 - y[0] * y[2] - A),
y[4], -100 * y[0] * y[4] - 1, y[6], -70 * y[0] * y[6]
))
def bc_flow(self, ya, yb, p):
return np.array([
ya[0], ya[1], yb[0] - 1, yb[1], ya[3], yb[3], ya[5], yb[5] - 1])
def time_flow(self):
x = np.linspace(0, 1, 10)
y = np.ones((7, x.size))
solve_bvp(self.fun_flow, self.bc_flow, x, y, p=[1], tol=self.TOL)
def fun_peak(self, x, y):
eps = 1e-3
return np.vstack((
y[1],
-(4 * x * y[1] + 2 * y[0]) / (eps + x**2)
))
def bc_peak(self, ya, yb):
eps = 1e-3
v = (1 + eps) ** -1
return np.array([ya[0] - v, yb[0] - v])
def time_peak(self):
x = np.linspace(-1, 1, 5)
y = np.zeros((2, x.size))
solve_bvp(self.fun_peak, self.bc_peak, x, y, tol=self.TOL)
def fun_gas(self, x, y):
alpha = 0.8
return np.vstack((
y[1],
-2 * x * y[1] * (1 - alpha * y[0]) ** -0.5
))
def bc_gas(self, ya, yb):
return np.array([ya[0] - 1, yb[0]])
def time_gas(self):
x = np.linspace(0, 3, 5)
y = np.empty((2, x.size))
y[0] = 0.5
y[1] = -0.5
solve_bvp(self.fun_gas, self.bc_gas, x, y, tol=self.TOL)
class Quad(Benchmark):
def setup(self):
from math import sin
self.f_python = lambda x: sin(x)
self.f_cython = from_cython(_test_ccallback_cython, "sine")
lib = ctypes.CDLL(clib_test.__file__)
self.f_ctypes = lib._multivariate_sin
self.f_ctypes.restype = ctypes.c_double
self.f_ctypes.argtypes = (ctypes.c_int, ctypes.c_double) # sic -- for backward compat
if cffi is not None:
voidp = ctypes.cast(self.f_ctypes, ctypes.c_void_p)
address = voidp.value
ffi = cffi.FFI()
self.f_cffi = LowLevelCallable(ffi.cast("double (*)(int, double *)", address))
def time_quad_python(self):
quad(self.f_python, 0, np.pi)
def time_quad_cython(self):
quad(self.f_cython, 0, np.pi)
def time_quad_ctypes(self):
quad(self.f_ctypes, 0, np.pi)
def time_quad_cffi(self):
quad(self.f_cffi, 0, np.pi)
|
SungEun-Steve-Kim/test-mp | refs/heads/master | tests/basics/bytearray1.py | 22 | print(bytearray(4))
a = bytearray([1, 2, 200])
print(type(a))
print(a[0], a[2])
print(a[-1])
print(a)
a[2] = 255
print(a[-1])
a.append(10)
print(len(a))
s = 0
for i in a:
s += i
print(s)
print(a[1:])
print(a[:-1])
print(a[2:3])
print(str(bytearray(b"123"), "utf-8"))
# Comparisons
print(bytearray([1]) == bytearray([1]))
print(bytearray([1]) == bytearray([2]))
print(bytearray([1]) == b"1")
print(b"1" == bytearray([1]))
print(bytearray() == bytearray())
# TODO: other comparisons
|
ToonTownInfiniteRepo/ToontownInfinite | refs/heads/master | toontown/catalog/CatalogSurfaceItem.py | 2 | import CatalogItem
import CatalogAtticItem
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from CatalogSurfaceColors import *
STWallpaper = 0
STMoulding = 1
STFlooring = 2
STWainscoting = 3
NUM_ST_TYPES = 4
class CatalogSurfaceItem(CatalogAtticItem.CatalogAtticItem):
def makeNewItem(self):
CatalogAtticItem.CatalogAtticItem.makeNewItem(self)
def setPatternIndex(self, patternIndex):
self.patternIndex = patternIndex
def setColorIndex(self, colorIndex):
self.colorIndex = colorIndex
def saveHistory(self):
return 1
def recordPurchase(self, avatar, optional):
self.giftTag = None
house, retcode = self.getHouseInfo(avatar)
if retcode >= 0:
house.addWallpaper(self)
return retcode
def getDeliveryTime(self):
return 60
|
nfallen/servo | refs/heads/master | tests/wpt/harness/wptrunner/manifestexpected.py | 91 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import urlparse
from wptmanifest.backends import static
from wptmanifest.backends.static import ManifestItem
import expected
"""Manifest structure used to store expected results of a test.
Each manifest file is represented by an ExpectedManifest that
has one or more TestNode children, one per test in the manifest.
Each TestNode has zero or more SubtestNode children, one for each
known subtest of the test.
"""
def data_cls_getter(output_node, visited_node):
# visited_node is intentionally unused
if output_node is None:
return ExpectedManifest
if isinstance(output_node, ExpectedManifest):
return TestNode
if isinstance(output_node, TestNode):
return SubtestNode
raise ValueError
def disabled(node):
"""Boolean indicating whether the test is disabled"""
try:
return node.get("disabled")
except KeyError:
return None
def tags(node):
"""Set of tags that have been applied to the test"""
try:
value = node.get("tags")
if isinstance(value, (str, unicode)):
return {value}
return set(value)
except KeyError:
return set()
def prefs(node):
def value(ini_value):
if isinstance(ini_value, (str, unicode)):
return tuple(ini_value.split(":", 1))
else:
return (ini_value, None)
try:
node_prefs = node.get("prefs")
if type(node_prefs) in (str, unicode):
prefs = {value(node_prefs)}
rv = dict(value(item) for item in node_prefs)
except KeyError:
rv = {}
return rv
class ExpectedManifest(ManifestItem):
def __init__(self, name, test_path, url_base):
"""Object representing all the tests in a particular manifest
:param name: Name of the AST Node associated with this object.
Should always be None since this should always be associated with
the root node of the AST.
:param test_path: Path of the test file associated with this manifest.
:param url_base: Base url for serving the tests in this manifest
"""
if name is not None:
raise ValueError("ExpectedManifest should represent the root node")
if test_path is None:
raise ValueError("ExpectedManifest requires a test path")
if url_base is None:
raise ValueError("ExpectedManifest requires a base url")
ManifestItem.__init__(self, name)
self.child_map = {}
self.test_path = test_path
self.url_base = url_base
def append(self, child):
"""Add a test to the manifest"""
ManifestItem.append(self, child)
self.child_map[child.id] = child
def _remove_child(self, child):
del self.child_map[child.id]
ManifestItem.remove_child(self, child)
assert len(self.child_map) == len(self.children)
def get_test(self, test_id):
"""Get a test from the manifest by ID
:param test_id: ID of the test to return."""
return self.child_map.get(test_id)
@property
def url(self):
return urlparse.urljoin(self.url_base,
"/".join(self.test_path.split(os.path.sep)))
@property
def disabled(self):
return disabled(self)
@property
def tags(self):
return tags(self)
@property
def prefs(self):
return prefs(self)
class DirectoryManifest(ManifestItem):
@property
def disabled(self):
return disabled(self)
@property
def tags(self):
return tags(self)
@property
def prefs(self):
return prefs(self)
class TestNode(ManifestItem):
def __init__(self, name):
"""Tree node associated with a particular test in a manifest
:param name: name of the test"""
assert name is not None
ManifestItem.__init__(self, name)
self.updated_expected = []
self.new_expected = []
self.subtests = {}
self.default_status = None
self._from_file = True
@property
def is_empty(self):
required_keys = set(["type"])
if set(self._data.keys()) != required_keys:
return False
return all(child.is_empty for child in self.children)
@property
def test_type(self):
return self.get("type")
@property
def id(self):
return urlparse.urljoin(self.parent.url, self.name)
@property
def disabled(self):
return disabled(self)
@property
def tags(self):
return tags(self)
@property
def prefs(self):
return prefs(self)
def append(self, node):
"""Add a subtest to the current test
:param node: AST Node associated with the subtest"""
child = ManifestItem.append(self, node)
self.subtests[child.name] = child
def get_subtest(self, name):
"""Get the SubtestNode corresponding to a particular subtest, by name
:param name: Name of the node to return"""
if name in self.subtests:
return self.subtests[name]
return None
class SubtestNode(TestNode):
def __init__(self, name):
"""Tree node associated with a particular subtest in a manifest
:param name: name of the subtest"""
TestNode.__init__(self, name)
@property
def is_empty(self):
if self._data:
return False
return True
def get_manifest(metadata_root, test_path, url_base, run_info):
"""Get the ExpectedManifest for a particular test path, or None if there is no
metadata stored for that test path.
:param metadata_root: Absolute path to the root of the metadata directory
:param test_path: Path to the test(s) relative to the test root
:param url_base: Base url for serving the tests in this manifest
:param run_info: Dictionary of properties of the test run for which the expectation
values should be computed.
"""
manifest_path = expected.expected_path(metadata_root, test_path)
try:
with open(manifest_path) as f:
return static.compile(f,
run_info,
data_cls_getter=data_cls_getter,
test_path=test_path,
url_base=url_base)
except IOError:
return None
def get_dir_manifest(metadata_root, path, run_info):
"""Get the ExpectedManifest for a particular test path, or None if there is no
metadata stored for that test path.
:param metadata_root: Absolute path to the root of the metadata directory
:param path: Path to the ini file relative to the metadata root
:param run_info: Dictionary of properties of the test run for which the expectation
values should be computed.
"""
full_path = os.path.join(metadata_root, path)
try:
with open(full_path) as f:
return static.compile(f,
run_info,
data_cls_getter=lambda x,y: DirectoryManifest)
except IOError:
return None
|
RudyReeves/ud858-master | refs/heads/master | Lesson_2/001_Hello_Endpoints_Solutions/helloworld_api.py | 48 | """Hello World API implemented using Google Cloud Endpoints.
Contains declarations of endpoint, endpoint methods,
as well as the ProtoRPC message class and container required
for endpoint method definition.
"""
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
# If the request contains path or querystring arguments,
# you cannot use a simple Message class.
# Instead, you must use a ResourceContainer class
REQUEST_CONTAINER = endpoints.ResourceContainer(
message_types.VoidMessage,
name=messages.StringField(1),
)
REQUEST_GREETING_CONTAINER = endpoints.ResourceContainer(
period=messages.StringField(1),
name=messages.StringField(2),
)
package = 'Hello'
class Hello(messages.Message):
"""String that stores a message."""
greeting = messages.StringField(1)
@endpoints.api(name='helloworldendpoints', version='v1')
class HelloWorldApi(remote.Service):
"""Helloworld API v1."""
@endpoints.method(message_types.VoidMessage, Hello,
path = "sayHello", http_method='GET', name = "sayHello")
def say_hello(self, request):
return Hello(greeting="Hello World")
@endpoints.method(REQUEST_CONTAINER, Hello,
path = "sayHelloByName", http_method='GET', name = "sayHelloByName")
def say_hello_by_name(self, request):
greet = "Hello {}".format(request.name)
return Hello(greeting=greet)
@endpoints.method(REQUEST_GREETING_CONTAINER, Hello,
path = "greetByPeriod", http_method='GET', name = "greetByPeriod")
def greet_by_period(self, request):
greet = "Good {} {}".format(request.period, request.name)
return Hello(greeting=greet)
APPLICATION = endpoints.api_server([HelloWorldApi])
|
cc272309126/panda3d | refs/heads/master | direct/src/showbase/MessengerLeakDetector.py | 12 | from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.DirectObject import DirectObject
from direct.showbase.Job import Job
import gc, __builtin__
class MessengerLeakObject(DirectObject):
def __init__(self):
self.accept('leakEvent', self._handleEvent)
def _handleEvent(self):
pass
def _leakMessengerObject():
leakObject = MessengerLeakObject()
class MessengerLeakDetector(Job):
# check for objects that are only referenced by the messenger
# and would otherwise be garbage collected
notify = directNotify.newCategory("MessengerLeakDetector")
def __init__(self, name):
Job.__init__(self, name)
self.setPriority(Job.Priorities.Normal*2)
jobMgr.add(self)
def run(self):
# set of ids of objects that we know are always attached to builtin;
# if an object is attached to one of these, it's attached to builtin
# this cuts down on the amount of searching that needs to be done
builtinIds = set()
builtinIds.add(id(__builtin__.__dict__))
try:
builtinIds.add(id(base))
builtinIds.add(id(base.cr))
builtinIds.add(id(base.cr.doId2do))
except:
pass
try:
builtinIds.add(id(simbase))
builtinIds.add(id(simbase.air))
builtinIds.add(id(simbase.air.doId2do))
except:
pass
try:
builtinIds.add(id(uber))
builtinIds.add(id(uber.air))
builtinIds.add(id(uber.air.doId2do))
except:
pass
while True:
yield None
objects = messenger._Messenger__objectEvents.keys()
assert self.notify.debug('%s objects in the messenger' % len(objects))
for object in objects:
yield None
assert self.notify.debug('---> new object: %s' % itype(object))
# try to find a path to builtin that doesn't involve the messenger
# lists of objects for breadth-first search
# iterate through one list while populating other list
objList1 = []
objList2 = []
curObjList = objList1
nextObjList = objList2
visitedObjIds = set()
# add the id of the object, and the messenger containers so that
# the search for builtin will stop at the messenger; we're looking
# for any path to builtin that don't involve the messenger
visitedObjIds.add(id(object))
visitedObjIds.add(id(messenger._Messenger__objectEvents))
visitedObjIds.add(id(messenger._Messenger__callbacks))
nextObjList.append(object)
foundBuiltin = False
# breadth-first search, go until you run out of new objects or you find __builtin__
while len(nextObjList):
if foundBuiltin:
break
# swap the lists, prepare for the next pass
curObjList = nextObjList
nextObjList = []
assert self.notify.debug('next search iteration, num objects: %s' % len(curObjList))
for curObj in curObjList:
if foundBuiltin:
break
yield None
referrers = gc.get_referrers(curObj)
assert self.notify.debug('curObj: %s @ %s, %s referrers, repr=%s' % (
itype(curObj), hex(id(curObj)), len(referrers), fastRepr(curObj, maxLen=2)))
for referrer in referrers:
#assert self.notify.debug('referrer: %s' % itype(curObj))
yield None
refId = id(referrer)
# don't go in a loop
if refId in visitedObjIds:
#assert self.notify.debug('already visited')
continue
# don't self-reference
if referrer is curObjList or referrer is nextObjList:
continue
if refId in builtinIds:
# not a leak, there is a path to builtin that does not involve the messenger
#assert self.notify.debug('object has another path to __builtin__, it\'s not a messenger leak')
foundBuiltin = True
break
else:
visitedObjIds.add(refId)
nextObjList.append(referrer)
if not foundBuiltin:
self.notify.warning(
'%s is referenced only by the messenger' % (itype(object)))
|
joshuajan/odoo | refs/heads/master | addons/product_email_template/models/product.py | 379 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class product_template(osv.Model):
""" Product Template inheritance to add an optional email.template to a
product.template. When validating an invoice, an email will be send to the
customer based on this template. The customer will receive an email for each
product linked to an email template. """
_inherit = "product.template"
_columns = {
'email_template_id': fields.many2one(
'email.template', 'Product Email Template',
help='When validating an invoice, an email will be sent to the customer'
'based on this template. The customer will receive an email for each'
'product linked to an email template.'),
}
|
nnethercote/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/third_party/webencodings/webencodings/x_user_defined.py | 171 | # coding: utf-8
"""
webencodings.x_user_defined
~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the x-user-defined encoding.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
codec_info = codecs.CodecInfo(
name='x-user-defined',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
# Python 3:
# for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700))
decoding_table = (
'\x00'
'\x01'
'\x02'
'\x03'
'\x04'
'\x05'
'\x06'
'\x07'
'\x08'
'\t'
'\n'
'\x0b'
'\x0c'
'\r'
'\x0e'
'\x0f'
'\x10'
'\x11'
'\x12'
'\x13'
'\x14'
'\x15'
'\x16'
'\x17'
'\x18'
'\x19'
'\x1a'
'\x1b'
'\x1c'
'\x1d'
'\x1e'
'\x1f'
' '
'!'
'"'
'#'
'$'
'%'
'&'
"'"
'('
')'
'*'
'+'
','
'-'
'.'
'/'
'0'
'1'
'2'
'3'
'4'
'5'
'6'
'7'
'8'
'9'
':'
';'
'<'
'='
'>'
'?'
'@'
'A'
'B'
'C'
'D'
'E'
'F'
'G'
'H'
'I'
'J'
'K'
'L'
'M'
'N'
'O'
'P'
'Q'
'R'
'S'
'T'
'U'
'V'
'W'
'X'
'Y'
'Z'
'['
'\\'
']'
'^'
'_'
'`'
'a'
'b'
'c'
'd'
'e'
'f'
'g'
'h'
'i'
'j'
'k'
'l'
'm'
'n'
'o'
'p'
'q'
'r'
's'
't'
'u'
'v'
'w'
'x'
'y'
'z'
'{'
'|'
'}'
'~'
'\x7f'
'\uf780'
'\uf781'
'\uf782'
'\uf783'
'\uf784'
'\uf785'
'\uf786'
'\uf787'
'\uf788'
'\uf789'
'\uf78a'
'\uf78b'
'\uf78c'
'\uf78d'
'\uf78e'
'\uf78f'
'\uf790'
'\uf791'
'\uf792'
'\uf793'
'\uf794'
'\uf795'
'\uf796'
'\uf797'
'\uf798'
'\uf799'
'\uf79a'
'\uf79b'
'\uf79c'
'\uf79d'
'\uf79e'
'\uf79f'
'\uf7a0'
'\uf7a1'
'\uf7a2'
'\uf7a3'
'\uf7a4'
'\uf7a5'
'\uf7a6'
'\uf7a7'
'\uf7a8'
'\uf7a9'
'\uf7aa'
'\uf7ab'
'\uf7ac'
'\uf7ad'
'\uf7ae'
'\uf7af'
'\uf7b0'
'\uf7b1'
'\uf7b2'
'\uf7b3'
'\uf7b4'
'\uf7b5'
'\uf7b6'
'\uf7b7'
'\uf7b8'
'\uf7b9'
'\uf7ba'
'\uf7bb'
'\uf7bc'
'\uf7bd'
'\uf7be'
'\uf7bf'
'\uf7c0'
'\uf7c1'
'\uf7c2'
'\uf7c3'
'\uf7c4'
'\uf7c5'
'\uf7c6'
'\uf7c7'
'\uf7c8'
'\uf7c9'
'\uf7ca'
'\uf7cb'
'\uf7cc'
'\uf7cd'
'\uf7ce'
'\uf7cf'
'\uf7d0'
'\uf7d1'
'\uf7d2'
'\uf7d3'
'\uf7d4'
'\uf7d5'
'\uf7d6'
'\uf7d7'
'\uf7d8'
'\uf7d9'
'\uf7da'
'\uf7db'
'\uf7dc'
'\uf7dd'
'\uf7de'
'\uf7df'
'\uf7e0'
'\uf7e1'
'\uf7e2'
'\uf7e3'
'\uf7e4'
'\uf7e5'
'\uf7e6'
'\uf7e7'
'\uf7e8'
'\uf7e9'
'\uf7ea'
'\uf7eb'
'\uf7ec'
'\uf7ed'
'\uf7ee'
'\uf7ef'
'\uf7f0'
'\uf7f1'
'\uf7f2'
'\uf7f3'
'\uf7f4'
'\uf7f5'
'\uf7f6'
'\uf7f7'
'\uf7f8'
'\uf7f9'
'\uf7fa'
'\uf7fb'
'\uf7fc'
'\uf7fd'
'\uf7fe'
'\uf7ff'
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
|
piskvorky/gensim | refs/heads/develop | docs/src/auto_examples/core/run_similarity_queries.py | 8 | r"""
Similarity Queries
==================
Demonstrates querying a corpus for similar documents.
"""
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
###############################################################################
#
# Creating the Corpus
# -------------------
#
# First, we need to create a corpus to work with.
# This step is the same as in the previous tutorial;
# if you completed it, feel free to skip to the next section.
from collections import defaultdict
from gensim import corpora
documents = [
"Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey",
]
# remove common words and tokenize
stoplist = set('for a of the and to in'.split())
texts = [
[word for word in document.lower().split() if word not in stoplist]
for document in documents
]
# remove words that appear only once
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [
[token for token in text if frequency[token] > 1]
for text in texts
]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
###############################################################################
# Similarity interface
# --------------------
#
# In the previous tutorials on
# :ref:`sphx_glr_auto_examples_core_run_corpora_and_vector_spaces.py`
# and
# :ref:`sphx_glr_auto_examples_core_run_topics_and_transformations.py`,
# we covered what it means to create a corpus in the Vector Space Model and how
# to transform it between different vector spaces. A common reason for such a
# charade is that we want to determine **similarity between pairs of
# documents**, or the **similarity between a specific document and a set of
# other documents** (such as a user query vs. indexed documents).
#
# To show how this can be done in gensim, let us consider the same corpus as in the
# previous examples (which really originally comes from Deerwester et al.'s
# `"Indexing by Latent Semantic Analysis" <http://www.cs.bham.ac.uk/~pxt/IDA/lsa_ind.pdf>`_
# seminal 1990 article).
# To follow Deerwester's example, we first use this tiny corpus to define a 2-dimensional
# LSI space:
from gensim import models
lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=2)
###############################################################################
# For the purposes of this tutorial, there are only two things you need to know about LSI.
# First, it's just another transformation: it transforms vectors from one space to another.
# Second, the benefit of LSI is that enables identifying patterns and relationships between terms (in our case, words in a document) and topics.
# Our LSI space is two-dimensional (`num_topics = 2`) so there are two topics, but this is arbitrary.
# If you're interested, you can read more about LSI here: `Latent Semantic Indexing <https://en.wikipedia.org/wiki/Latent_semantic_indexing>`_:
#
# Now suppose a user typed in the query `"Human computer interaction"`. We would
# like to sort our nine corpus documents in decreasing order of relevance to this query.
# Unlike modern search engines, here we only concentrate on a single aspect of possible
# similarities---on apparent semantic relatedness of their texts (words). No hyperlinks,
# no random-walk static ranks, just a semantic extension over the boolean keyword match:
doc = "Human computer interaction"
vec_bow = dictionary.doc2bow(doc.lower().split())
vec_lsi = lsi[vec_bow] # convert the query to LSI space
print(vec_lsi)
###############################################################################
# In addition, we will be considering `cosine similarity <http://en.wikipedia.org/wiki/Cosine_similarity>`_
# to determine the similarity of two vectors. Cosine similarity is a standard measure
# in Vector Space Modeling, but wherever the vectors represent probability distributions,
# `different similarity measures <http://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#Symmetrised_divergence>`_
# may be more appropriate.
#
# Initializing query structures
# ++++++++++++++++++++++++++++++++
#
# To prepare for similarity queries, we need to enter all documents which we want
# to compare against subsequent queries. In our case, they are the same nine documents
# used for training LSI, converted to 2-D LSA space. But that's only incidental, we
# might also be indexing a different corpus altogether.
from gensim import similarities
index = similarities.MatrixSimilarity(lsi[corpus]) # transform corpus to LSI space and index it
###############################################################################
# .. warning::
# The class :class:`similarities.MatrixSimilarity` is only appropriate when the whole
# set of vectors fits into memory. For example, a corpus of one million documents
# would require 2GB of RAM in a 256-dimensional LSI space, when used with this class.
#
# Without 2GB of free RAM, you would need to use the :class:`similarities.Similarity` class.
# This class operates in fixed memory, by splitting the index across multiple files on disk, called shards.
# It uses :class:`similarities.MatrixSimilarity` and :class:`similarities.SparseMatrixSimilarity` internally,
# so it is still fast, although slightly more complex.
#
# Index persistency is handled via the standard :func:`save` and :func:`load` functions:
index.save('/tmp/deerwester.index')
index = similarities.MatrixSimilarity.load('/tmp/deerwester.index')
###############################################################################
# This is true for all similarity indexing classes (:class:`similarities.Similarity`,
# :class:`similarities.MatrixSimilarity` and :class:`similarities.SparseMatrixSimilarity`).
# Also in the following, `index` can be an object of any of these. When in doubt,
# use :class:`similarities.Similarity`, as it is the most scalable version, and it also
# supports adding more documents to the index later.
#
# Performing queries
# ++++++++++++++++++
#
# To obtain similarities of our query document against the nine indexed documents:
sims = index[vec_lsi] # perform a similarity query against the corpus
print(list(enumerate(sims))) # print (document_number, document_similarity) 2-tuples
###############################################################################
# Cosine measure returns similarities in the range `<-1, 1>` (the greater, the more similar),
# so that the first document has a score of 0.99809301 etc.
#
# With some standard Python magic we sort these similarities into descending
# order, and obtain the final answer to the query `"Human computer interaction"`:
sims = sorted(enumerate(sims), key=lambda item: -item[1])
for doc_position, doc_score in sims:
print(doc_score, documents[doc_position])
###############################################################################
# The thing to note here is that documents no. 2 (``"The EPS user interface management system"``)
# and 4 (``"Relation of user perceived response time to error measurement"``) would never be returned by
# a standard boolean fulltext search, because they do not share any common words with ``"Human
# computer interaction"``. However, after applying LSI, we can observe that both of
# them received quite high similarity scores (no. 2 is actually the most similar!),
# which corresponds better to our intuition of
# them sharing a "computer-human" related topic with the query. In fact, this semantic
# generalization is the reason why we apply transformations and do topic modelling
# in the first place.
#
# Where next?
# ------------
#
# Congratulations, you have finished the tutorials -- now you know how gensim works :-)
# To delve into more details, you can browse through the :ref:`apiref`,
# see the :ref:`wiki` or perhaps check out :ref:`distributed` in `gensim`.
#
# Gensim is a fairly mature package that has been used successfully by many individuals and companies, both for rapid prototyping and in production.
# That doesn't mean it's perfect though:
#
# * there are parts that could be implemented more efficiently (in C, for example), or make better use of parallelism (multiple machines cores)
# * new algorithms are published all the time; help gensim keep up by `discussing them <http://groups.google.com/group/gensim>`_ and `contributing code <https://github.com/piskvorky/gensim/wiki/Developer-page>`_
# * your **feedback is most welcome** and appreciated (and it's not just the code!):
# `bug reports <https://github.com/piskvorky/gensim/issues>`_ or
# `user stories and general questions <http://groups.google.com/group/gensim/topics>`_.
#
# Gensim has no ambition to become an all-encompassing framework, across all NLP (or even Machine Learning) subfields.
# Its mission is to help NLP practitioners try out popular topic modelling algorithms
# on large datasets easily, and to facilitate prototyping of new algorithms for researchers.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('run_similarity_queries.png')
imgplot = plt.imshow(img)
_ = plt.axis('off')
|
davidandreoletti/loggingapi-core-lib | refs/heads/master | scripts/google-cpp-style-guide/cpplint.py | 3 | #!/usr/bin/python2.4
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Here are some issues that I've had people identify in my code during reviews,
# that I think are possible to flag automatically in a lint tool. If these were
# caught by lint, it would save time both for myself and that of my reviewers.
# Most likely, some of these are beyond the scope of the current lint framework,
# but I think it is valuable to retain these wish-list items even if they cannot
# be immediately implemented.
#
# Suggestions
# -----------
# - Check for no 'explicit' for multi-arg ctor
# - Check for boolean assign RHS in parens
# - Check for ctor initializer-list colon position and spacing
# - Check that if there's a ctor, there should be a dtor
# - Check accessors that return non-pointer member variables are
# declared const
# - Check accessors that return non-const pointer member vars are
# *not* declared const
# - Check for using public includes for testing
# - Check for spaces between brackets in one-line inline method
# - Check for no assert()
# - Check for spaces surrounding operators
# - Check for 0 in pointer context (should be NULL)
# - Check for 0 in char context (should be '\0')
# - Check for camel-case method name conventions for methods
# that are not simple inline getters and setters
# - Check that base classes have virtual destructors
# put " // namespace" after } that closes a namespace, with
# namespace's name after 'namespace' if it is named.
# - Do not indent namespace contents
# - Avoid inlining non-trivial constructors in header files
# include base/basictypes.h if DISALLOW_EVIL_CONSTRUCTORS is used
# - Check for old-school (void) cast for call-sites of functions
# ignored return value
# - Check gUnit usage of anonymous namespace
# - Check for class declaration order (typedefs, consts, enums,
# ctor(s?), dtor, friend declarations, methods, member vars)
#
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Linted extensions are .cc, .cpp, and .h. Other file types will be ignored.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
# \ used for clearer layout -- pylint: disable-msg=C6013
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/nolint',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/rtti',
'runtime/sizeof',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/virtual',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/indent',
'whitespace/labels',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# Headers that we consider STL headers.
_STL_HEADERS = frozenset([
'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'new',
'pair.h', 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
'utility', 'vector', 'vector.h',
])
# Non-STL C++ system headers.
_CPP_HEADERS = frozenset([
'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream',
'istream.h', 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
'numeric', 'ostream', 'ostream.h', 'parsestream.h', 'pfstream.h',
'PlotFile.h', 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h',
'ropeimpl.h', 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
_regexp_compile_cache = {}
# Finds occurrences of NOLINT or NOLINT(...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
category = matched.group(1)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
header_path: Header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
canonical_header = self.CanonicalizeAlphabeticalOrder(header_path)
if self._last_header > canonical_header:
return False
self._last_header = canonical_header
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
for linenum in range(len(lines)):
self.lines.append(CleanseComments(lines[linenum]))
elided = self._CollapseStrings(lines[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[', finds the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
num_open = line.count(startchar) - line.count(endchar)
while linenum < clean_lines.NumLines() and num_open > 0:
linenum += 1
line = clean_lines.elided[linenum]
num_open += line.count(startchar) - line.count(endchar)
# OK, now find the endchar that actually got us back to even
endpos = len(line)
while num_open >= 0:
endpos = line.rfind(')', 0, endpos)
num_open -= 1 # chopped off another )
return (line, linenum, endpos + 1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
fileinfo = FileInfo(filename)
return re.sub(r'[-./\s]', '_', fileinfo.RepositoryName()).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForUnicodeReplacementCharacters(filename, lines, error):
"""Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. They\'re '
'ugly and unnecessary, and you should use concatenation instead".')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('readdir(', 'readdir_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _ClassInfo(object):
"""Stores information about a class."""
def __init__(self, name, clean_lines, linenum):
self.name = name
self.linenum = linenum
self.seen_open_brace = False
self.is_derived = False
self.virtual_method_linenumber = None
self.has_virtual_destructor = False
self.brace_depth = 0
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.lines[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
class _ClassState(object):
"""Holds the current state of the parse relating to class declarations.
It maintains a stack of _ClassInfos representing the parser's guess
as to the current nesting of class declarations. The innermost class
is at the top (back) of the stack. Typically, the stack will either
be empty or have exactly one entry.
"""
def __init__(self):
self.classinfo_stack = []
def CheckFinished(self, filename, error):
"""Checks that all classes have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
if self.classinfo_stack:
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
error(filename, self.classinfo_stack[0].linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
self.classinfo_stack[0].name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
class_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
- classes with virtual methods need virtual destructors (compiler warning
available, but not turned on yet.)
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(auto|register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Track class entry and exit, and attempt to find cases within the
# class declaration that don't meet the C++ style
# guidelines. Tracking is very dependent on the code matching Google
# style guidelines, but it seems to perform well enough in testing
# to be a worthwhile addition to the checks.
classinfo_stack = class_state.classinfo_stack
# Look for a class declaration. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
'(class|struct)\s+([A-Z_]+\s+)*(\w+(::\w+)*)', line)
if class_decl_match:
classinfo_stack.append(_ClassInfo(
class_decl_match.group(4), clean_lines, linenum))
# Everything else in this function uses the top of the stack if it's
# not empty.
if not classinfo_stack:
return
classinfo = classinfo_stack[-1]
# If the opening brace hasn't been seen look for it and also
# parent class declarations.
if not classinfo.seen_open_brace:
# If the line has a ';' in it, assume it's a forward declaration or
# a single-line class declaration, which we won't process.
if line.find(';') != -1:
classinfo_stack.pop()
return
classinfo.seen_open_brace = (line.find('{') != -1)
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', line):
classinfo.is_derived = True
if not classinfo.seen_open_brace:
return # Everything else in this function is for after open brace
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s\s*(?:<\w+>\s*)?&' % re.escape(base_classname),
args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
# Look for methods declared virtual.
if Search(r'\bvirtual\b', line):
classinfo.virtual_method_linenumber = linenum
# Only look for a destructor declaration on the same line. It would
# be extremely unlikely for the destructor declaration to occupy
# more than one line.
if Search(r'~%s\s*\(' % base_classname, line):
classinfo.has_virtual_destructor = True
# Look for class end.
brace_depth = classinfo.brace_depth
brace_depth = brace_depth + line.count('{') - line.count('}')
if brace_depth <= 0:
classinfo = classinfo_stack.pop()
# Try to detect missing virtual destructor declarations.
# For now, only warn if a non-derived class with virtual methods lacks
# a virtual destructor. This is to make it less likely that people will
# declare derived virtual destructors without declaring the base
# destructor virtual.
if ((classinfo.virtual_method_linenumber is not None) and
(not classinfo.has_virtual_destructor) and
(not classinfo.is_derived)): # Only warn for base classes
error(filename, classinfo.linenum, 'runtime/virtual', 4,
'The class %s probably needs a virtual destructor due to '
'having virtual method(s), one declared at line %d.'
% (classinfo.name, classinfo.virtual_method_linenumber))
else:
classinfo.brace_depth = brace_depth
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable-msg=C6403
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckSpacing(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
if IsBlankLine(line):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if (prevbrace != -1 and prev_line[prevbrace:].find('}') == -1
and prev_line[:prevbrace].find('namespace') == -1):
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Blank line at the start of a code block. Is this needed?')
# This doesn't ignore whitespace at the end of a namespace block
# because that is too hard without pairing open/close braces;
# however, a special exception is made for namespace closing
# brackets which have a comment containing "namespace".
#
# Also, ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('namespace') == -1
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Blank line at the end of a code block. Is this needed?')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
# Alas, we can't test < or > because they're legitimately used sans spaces
# (a->b, vector<int> a). The only time we can tell is a < with no >, and
# only if it's not template params list spilling into the next line.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if not match:
# Note that while it seems that the '<[^<]*' term in the following
# regexp could be simplified to '<.*', which would indeed match
# the same class of strings, the [^<] means that searching for the
# regexp takes linear rather than quadratic time.
if not Search(r'<[^<]*,\s*$', line): # template params spill
match = Search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << and >> when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
match = Search(r'[^0-9\s](<<|>>)[^0-9\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if not len(match.group(2)) in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
if Search(r',[^\s]', line):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
if Search(r'[^ ({]{', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use { } instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use { } instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use { } instead.')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.linenum <= 24 or
linenum <= class_info.linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.linenum
for i in range(class_info.linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone
# is using braces in a block to explicitly create a new scope,
# which is commonly used to control the lifetime of
# stack-allocated variables. We don't detect this perfectly: we
# just don't complain if the last non-whitespace character on the
# previous non-blank line is ';', ':', '{', or '}'.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if not Search(r'[;:}{]\s*$', prevline):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array.
# We can't tell in general, but we can for some common cases.
prevlinenum = linenum
while True:
(prevline, prevlinenum) = GetPreviousNonBlankLine(clean_lines, prevlinenum)
if Match(r'\s+{.*}\s*;', line) and not prevline.count(';'):
line = prevline + line
else:
break
if (Search(r'{.*}\s*;', line) and
line.count('{') == line.count('}') and
not Search(r'struct|class|enum|\s*=\s*{', line)):
error(filename, linenum, 'readability/braces', 4,
"You don't need a ; after a }")
def ReplaceableCheck(operator, macro, line):
"""Determine whether a basic CHECK can be replaced with a more specific one.
For example suggest using CHECK_EQ instead of CHECK(a == b) and
similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
Args:
operator: The C++ operator used in the CHECK.
macro: The CHECK or EXPECT macro being called.
line: The current source line.
Returns:
True if the CHECK can be replaced with a more specific one.
"""
# This matches decimal and hex integers, strings, and chars (in that order).
match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
# Expression to match two sides of the operator with something that
# looks like a literal, since CHECK(x == iterator) won't compile.
# This means we can't catch all the cases where a more specific
# CHECK is possible, but it's less annoying than dealing with
# extraneous warnings.
match_this = (r'\s*' + macro + r'\((\s*' +
match_constant + r'\s*' + operator + r'[^<>].*|'
r'.*[^<>]' + operator + r'\s*' + match_constant +
r'\s*\))')
# Don't complain about CHECK(x == NULL) or similar because
# CHECK_EQ(x, NULL) won't compile (requires a cast).
# Also, don't complain about more complex boolean expressions
# involving && or || such as CHECK(a == b || c == d).
return Match(match_this, line) and not Search(r'NULL|&&|\|\|', line)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
raw_lines = clean_lines.raw_lines
current_macro = ''
for macro in _CHECK_MACROS:
if raw_lines[linenum].find(macro) >= 0:
current_macro = macro
break
if not current_macro:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
line = clean_lines.elided[linenum] # get rid of comments and strings
# Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
for operator in ['==', '!=', '>=', '>', '<=', '<']:
if ReplaceableCheck(operator, current_macro, line):
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[current_macro][operator],
current_macro, operator))
break
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, class_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
error: The function to call with any errors found.
"""
raw_lines = clean_lines.raw_lines
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Labels should always be indented at least one space.
elif not initial_spaces and line[:2] != '//' and Search(r'[^:]:\s*$',
line):
error(filename, linenum, 'whitespace/labels', 4,
'Labels should always be indented at least one space. '
'If this is a member-initializer list in a constructor or '
'the base class list in a class definition, the colon should '
'be on the following line.')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
if line_width > 100:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than 100 characters')
elif line_width > 80:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= 80 characters long')
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 4,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckSpacing(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
if class_state and class_state.classinfo_stack:
CheckSectionSpacing(filename, clean_lines,
class_state.classinfo_stack[-1], linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_stl_h = include in _STL_HEADERS
is_cpp_h = is_stl_h or include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
if not include_state.IsInAlphabeticalOrder(include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state,
error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check for non-const references in functions. This is tricky because &
# is also used to take the address of something. We allow <> for templates,
# (ignoring whatever is between the braces) and : for classes.
# These are complicated re's. They try to capture the following:
# paren (for fn-prototype start), typename, &, varname. For the const
# version, we're willing for const to be before typename or after
# Don't check the implementation on same line.
fnline = line.split('{', 1)[0]
if (len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) >
len(re.findall(r'\([^()]*\bconst\s+(?:typename\s+)?(?:struct\s+)?'
r'(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) +
len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+\s+const(\s?&|&\s?)[\w]+',
fnline))):
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>".
if not Search(
r'(swap|Swap|operator[<>][<>])\s*\(\s*(?:[\w:]|<.*>)+\s*&',
fnline):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer.')
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
if match:
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
if (match.group(1) is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Match(r'^\s*MockCallback<.*>', line))):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
match.group(2))
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
if Search(
r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
if match and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
match.group(3)):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
# Check that we're not using RTTI outside of testing code.
if Search(r'\bdynamic_cast<', line) and not _IsTestFilename(filename):
error(filename, linenum, 'runtime/rtti', 5,
'Do not use dynamic_cast<>. If you need to cast within a class '
"hierarchy, use static_cast<> to upcast. Google doesn't support "
'RTTI.')
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
if Search(r'\bsscanf\b', line):
error(filename, linenum, 'runtime/printf', 1,
'sscanf can be ok, but is slow and can overflow buffers.')
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match:
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
This also handles sizeof(type) warnings, due to similarity of content.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# e.g., sizeof(int)
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
error(filename, linenum, 'runtime/sizeof', 1,
'Using sizeof(type). Use sizeof(varname) instead if possible')
return True
remainder = line[match.end(0):]
# The close paren is for function pointers as arguments to a function.
# eg, void foo(void (*bar)(int));
# The semicolon check is a more basic function check; also possibly a
# function pointer typedef.
# eg, void foo(int); or void foo(int) const;
# The equals check is for function pointer assignment.
# eg, void *(*foo)(int) = ...
# The > is for MockCallback<...> ...
#
# Right now, this will only catch cases where there's a single argument, and
# it's unnamed. It should probably be expanded to check for multiple
# arguments with some unnamed.
function_match = Match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)|>))', remainder)
if function_match:
if (not function_match.group(3) or
function_match.group(3) == ';' or
('MockCallback<' not in raw_line and
'/*' not in raw_line)):
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_state.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'Omit template arguments from make_pair OR use pair directly OR'
' if appropriate, construct a pair directly')
def ProcessLine(filename, file_extension,
clean_lines, line, include_state, function_state,
class_state, error, extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, class_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
error)
CheckForNonStandardConstructs(filename, clean_lines, line,
class_state, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
class_state = _ClassState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, class_state, error,
extra_check_functions)
class_state.CheckFinished(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForUnicodeReplacementCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if (filename != '-' and file_extension != 'cc' and file_extension != 'h'
and file_extension != 'cpp'):
sys.stderr.write('Ignoring %s; not a .cc or .h file\n' % filename)
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if not val in ('emacs', 'vs7'):
PrintUsage('The only allowed output formats are emacs and vs7.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
|
tinacg/tinacg.github.io | refs/heads/master | taggednotes/process.py | 1 | import sys
if len(sys.argv) != 2:
print("Usage: python process.py languagenotes")
else:
language = sys.argv[1].replace(".", "")
with open(language + ".js", 'w') as output:
print("var notes = {", file=output)
with open(language + ".txt") as input:
id = 0
while True:
title = input.readline()
if title == "":
break
title = title.strip()
title = title.replace("\"", "\\\"")
# new note
tags_line = input.readline().strip()
tags = str(list(map(lambda s: s.strip().replace('"', '"').replace("'", "'"), tags_line.split(","))))
reference = input.readline().strip()
print('note' + str(id) + ': { ', file=output)
print('id: "note' + str(id) + '", ', file=output)
id += 1
print('title: "' + title + '",', file=output)
print('tags: ' + tags + ',', file=output)
print('reference: "' + reference + '",', file=output)
print('body: "', file=output, end="")
while True:
body_line = input.readline()
if body_line == "" or body_line.strip() == "--":
break
body_line = body_line.rstrip()
body_line = body_line.replace("\\", "\\\\")
body_line = body_line.replace("<", "<")
body_line = body_line.replace(">", ">")
print(body_line.replace('"', """), file=output, end="<br>")
print('" },\n', file=output)
print("numNotes: " + str(id), file=output)
print("};", file=output)
|
youssef-poisson/angular | refs/heads/master | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py | 1869 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
|
aYukiSekiguchi/ACCESS-Chromium | refs/heads/master | third_party/mesa/MesaLib/src/mapi/glapi/gen/license.py | 58 | # (C) Copyright IBM Corporation 2004
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
bsd_license_template = """%s
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sub license,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next
paragraph) shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
%s,
AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
|
utecuy/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/annotatable_module.py | 107 | import logging
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.fields import Scope, String
import textwrap
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class AnnotatableFields(object):
data = String(
help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""
<annotatable>
<instructions>
<p>Enter your (optional) instructions for the exercise in HTML format.</p>
<p>Annotations are specified by an <code><annotation></code> tag which may may have the following attributes:</p>
<ul class="instructions-template">
<li><code>title</code> (optional). Title of the annotation. Defaults to <i>Commentary</i> if omitted.</li>
<li><code>body</code> (<b>required</b>). Text of the annotation.</li>
<li><code>problem</code> (optional). Numeric index of the problem associated with this annotation. This is a zero-based index, so the first problem on the page would have <code>problem="0"</code>.</li>
<li><code>highlight</code> (optional). Possible values: yellow, red, orange, green, blue, or purple. Defaults to yellow if this attribute is omitted.</li>
</ul>
</instructions>
<p>Add your HTML with annotation spans here.</p>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. <annotation title="My title" body="My comment" highlight="yellow" problem="0">Ut sodales laoreet est, egestas gravida felis egestas nec.</annotation> Aenean at volutpat erat. Cras commodo viverra nibh in aliquam.</p>
<p>Nulla facilisi. <annotation body="Basic annotation example." problem="1">Pellentesque id vestibulum libero.</annotation> Suspendisse potenti. Morbi scelerisque nisi vitae felis dictum mattis. Nam sit amet magna elit. Nullam volutpat cursus est, sit amet sagittis odio vulputate et. Curabitur euismod, orci in vulputate imperdiet, augue lorem tempor purus, id aliquet augue turpis a est. Aenean a sagittis libero. Praesent fringilla pretium magna, non condimentum risus elementum nec. Pellentesque faucibus elementum pharetra. Pellentesque vitae metus eros.</p>
</annotatable>
""")
)
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Annotation'),
)
class AnnotatableModule(AnnotatableFields, XModule):
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/html/display.coffee'),
resource_string(__name__, 'js/src/annotatable/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
js_module_name = "Annotatable"
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'annotatable'
def __init__(self, *args, **kwargs):
super(AnnotatableModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
self.element_id = self.location.html_id()
self.highlight_colors = ['yellow', 'orange', 'purple', 'blue', 'green']
def _get_annotation_class_attr(self, index, el):
""" Returns a dict with the CSS class attribute to set on the annotation
and an XML key to delete from the element.
"""
attr = {}
cls = ['annotatable-span', 'highlight']
highlight_key = 'highlight'
color = el.get(highlight_key)
if color is not None:
if color in self.highlight_colors:
cls.append('highlight-' + color)
attr['_delete'] = highlight_key
attr['value'] = ' '.join(cls)
return {'class': attr}
def _get_annotation_data_attr(self, index, el):
""" Returns a dict in which the keys are the HTML data attributes
to set on the annotation element. Each data attribute has a
corresponding 'value' and (optional) '_delete' key to specify
an XML attribute to delete.
"""
data_attrs = {}
attrs_map = {
'body': 'data-comment-body',
'title': 'data-comment-title',
'problem': 'data-problem-id'
}
for xml_key in attrs_map.keys():
if xml_key in el.attrib:
value = el.get(xml_key, '')
html_key = attrs_map[xml_key]
data_attrs[html_key] = {'value': value, '_delete': xml_key}
return data_attrs
def _render_annotation(self, index, el):
""" Renders an annotation element for HTML output. """
attr = {}
attr.update(self._get_annotation_class_attr(index, el))
attr.update(self._get_annotation_data_attr(index, el))
el.tag = 'span'
for key in attr.keys():
el.set(key, attr[key]['value'])
if '_delete' in attr[key] and attr[key]['_delete'] is not None:
delete_key = attr[key]['_delete']
del el.attrib[delete_key]
def _render_content(self):
""" Renders annotatable content with annotation spans and returns HTML. """
xmltree = etree.fromstring(self.content)
xmltree.tag = 'div'
if 'display_name' in xmltree.attrib:
del xmltree.attrib['display_name']
index = 0
for el in xmltree.findall('.//annotation'):
self._render_annotation(index, el)
index += 1
return etree.tostring(xmltree, encoding='unicode')
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
instructions = xmltree.find('instructions')
if instructions is not None:
instructions.tag = 'div'
xmltree.remove(instructions)
return etree.tostring(instructions, encoding='unicode')
return None
def get_html(self):
""" Renders parameters to template. """
context = {
'display_name': self.display_name_with_default,
'element_id': self.element_id,
'instructions_html': self.instructions,
'content_html': self._render_content()
}
return self.system.render_template('annotatable.html', context)
class AnnotatableDescriptor(AnnotatableFields, RawDescriptor):
module_class = AnnotatableModule
mako_template = "widgets/raw-edit.html"
|
hurricup/intellij-community | refs/heads/master | python/testData/optimizeImports/alphabeticalOrder.py | 92 | from __future__ import unicode_literals
from __future__ import absolute_import
import z
import b
import a
from a import C1
from alphabet import D
from alphabet import A
from b import func
from import foo # broken
import sys
from . import m1
from datetime import timedelta
import # broken
from alphabet import *
from .. import m2
from alphabet import C
from alphabet import B, A
from .pkg import m3
from . import m4, m5
print(z, b, a, C1, func, sys, abc, foo, timedelta, A, B, C, D, m1, m2, m3, m4, m5) |
SlimRemix/android_external_chromium_org | refs/heads/lp5.1 | chrome/common/extensions/docs/server2/compiled_file_system_test.py | 79 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import os
from compiled_file_system import Cache, CompiledFileSystem
from copy import deepcopy
from environment import GetAppVersion
from file_system import FileNotFoundError
from mock_file_system import MockFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
from test_object_store import TestObjectStore
import unittest
_TEST_DATA = {
'404.html': '404.html contents',
'apps': {
'a11y.html': 'a11y.html contents',
'about_apps.html': 'about_apps.html contents',
'fakedir': {
'file.html': 'file.html contents'
},
'deepdir': {
'deepfile.html': 'deepfile.html contents',
'deeper': {
'deepest.html': 'deepest.html contents',
},
}
},
'extensions': {
'activeTab.html': 'activeTab.html contents',
'alarms.html': 'alarms.html contents'
}
}
identity = lambda _, x: x
def _GetTestCompiledFsCreator():
'''Returns a function which creates CompiledFileSystem views of
TestFileSystems backed by _TEST_DATA.
'''
return functools.partial(
CompiledFileSystem.Factory(
ObjectStoreCreator(start_empty=False,
store_type=TestObjectStore,
disable_wrappers=True),
).Create,
TestFileSystem(deepcopy(_TEST_DATA)))
class CompiledFileSystemTest(unittest.TestCase):
def testPopulateNamespace(self):
def CheckNamespace(expected_file, expected_list, fs):
self.assertEqual(expected_file, fs._file_object_store.namespace)
self.assertEqual(expected_list, fs._list_object_store.namespace)
compiled_fs_creator = _GetTestCompiledFsCreator()
f = lambda x: x
CheckNamespace(
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/file&'
'app_version=%s' % GetAppVersion(),
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/list&'
'app_version=%s' % GetAppVersion(),
compiled_fs_creator(f, CompiledFileSystemTest))
CheckNamespace(
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/foo/file&'
'app_version=%s' % GetAppVersion(),
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/foo/list&'
'app_version=%s' % GetAppVersion(),
compiled_fs_creator(f, CompiledFileSystemTest, category='foo'))
def testPopulateFromFile(self):
def Sleepy(key, val):
return '%s%s' % ('Z' * len(key), 'z' * len(val))
compiled_fs = _GetTestCompiledFsCreator()(Sleepy, CompiledFileSystemTest)
self.assertEqual('ZZZZZZZZzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual('ZZZZZZZZZZZZZZzzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('apps/a11y.html').Get())
self.assertEqual('ZZZZZZZZZZZZZZZZZZZZZZzzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('apps/fakedir/file.html').Get())
def testPopulateFromFileListing(self):
def strip_ext(_, files):
return [os.path.splitext(f)[0] for f in files]
compiled_fs = _GetTestCompiledFsCreator()(strip_ext, CompiledFileSystemTest)
expected_top_listing = [
'404',
'apps/a11y',
'apps/about_apps',
'apps/deepdir/deeper/deepest',
'apps/deepdir/deepfile',
'apps/fakedir/file',
'extensions/activeTab',
'extensions/alarms'
]
self.assertEqual(expected_top_listing,
sorted(compiled_fs.GetFromFileListing('').Get()))
expected_apps_listing = [
'a11y',
'about_apps',
'deepdir/deeper/deepest',
'deepdir/deepfile',
'fakedir/file',
]
self.assertEqual(expected_apps_listing,
sorted(compiled_fs.GetFromFileListing('apps/').Get()))
self.assertEqual(['file',],
compiled_fs.GetFromFileListing('apps/fakedir/').Get())
self.assertEqual(['deeper/deepest', 'deepfile'],
sorted(compiled_fs.GetFromFileListing(
'apps/deepdir/').Get()))
self.assertEqual(['deepest'],
compiled_fs.GetFromFileListing(
'apps/deepdir/deeper/').Get())
def testCaching(self):
compiled_fs = _GetTestCompiledFsCreator()(Cache(identity),
CompiledFileSystemTest)
self.assertEqual('404.html contents',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html',)),
set(compiled_fs.GetFromFileListing('apps/fakedir/').Get()))
compiled_fs._file_system._path_values['404.html'] = 'boom'
compiled_fs._file_system._path_values['apps/fakedir/'] = [
'file.html', 'boom.html']
self.assertEqual('404.html contents',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html',)),
set(compiled_fs.GetFromFileListing('apps/fakedir/').Get()))
compiled_fs._file_system.IncrementStat()
self.assertEqual('boom', compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html', 'boom.html')),
set(compiled_fs.GetFromFileListing('apps/fakedir/').Get()))
def testFailures(self):
compiled_fs = _GetTestCompiledFsCreator()(identity, CompiledFileSystemTest)
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFile('405.html').Get)
# TODO(kalman): would be nice to test this fails since apps/ is a dir.
compiled_fs.GetFromFile('apps')
#self.assertRaises(SomeError, compiled_fs.GetFromFile, 'apps/')
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFileListing('nodir/').Get)
# TODO(kalman): likewise, not a FileNotFoundError.
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFileListing('404.html/').Get)
def testCorrectFutureBehaviour(self):
# Tests that the underlying FileSystem's Read Future has had Get() called
# on it before the Future is resolved, but the underlying Future isn't
# resolved until Get is.
mock_fs = MockFileSystem(TestFileSystem(_TEST_DATA))
compiled_fs = CompiledFileSystem.Factory(
ObjectStoreCreator.ForTest()).Create(
mock_fs, lambda path, contents: contents, type(self))
self.assertTrue(*mock_fs.CheckAndReset())
future = compiled_fs.GetFromFile('404.html')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1, read_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=1))
future = compiled_fs.GetFromFileListing('apps/')
# Current behaviour is to have read=2 and read_resolve=1 because the first
# level is read eagerly, then all of the second is read (in parallel). If
# it weren't eager (and it may be worth experimenting with that) then it'd
# be read=1 and read_resolve=0.
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=2,
read_resolve_count=1))
future.Get()
# It's doing 1 more level 'deeper' (already read 'fakedir' and 'deepdir'
# though not resolved), so that's 1 more read/resolve + the resolve from
# the first read.
self.assertTrue(*mock_fs.CheckAndReset(read_count=1, read_resolve_count=2))
# Even though the directory is 1 layer deep the caller has no way of
# determining that ahead of time (though perhaps the API could give some
# kind of clue, if we really cared).
future = compiled_fs.GetFromFileListing('extensions/')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=1,
read_resolve_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset())
# Similar configuration to the 'apps/' case but deeper.
future = compiled_fs.GetFromFileListing('')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=2,
read_resolve_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset(read_count=2, read_resolve_count=3))
def testSkipNotFound(self):
mock_fs = MockFileSystem(TestFileSystem(_TEST_DATA))
compiled_fs = CompiledFileSystem.Factory(
ObjectStoreCreator.ForTest()).Create(
mock_fs, Cache(lambda path, contents: contents), type(self))
future = compiled_fs.GetFromFile('no_file', skip_not_found=True)
# If the file doesn't exist, then the file system is not read.
self.assertTrue(*mock_fs.CheckAndReset(read_count=1, stat_count=1))
self.assertEqual(None, future.Get())
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=1))
future = compiled_fs.GetFromFile('no_file', skip_not_found=True)
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1))
self.assertEqual(None, future.Get())
# The result for a non-existent file should still be cached.
self.assertTrue(*mock_fs.CheckAndReset())
future = compiled_fs.GetFromFile('no_file')
self.assertRaises(FileNotFoundError, future.Get)
if __name__ == '__main__':
unittest.main()
|
ethers/pyethereum | refs/heads/master | pyethereum/peer.py | 1 | import time
import Queue
import socket
import logging
import signals
from stoppable import StoppableLoopThread
from packeter import packeter
from utils import big_endian_to_int as idec
from utils import recursive_int_to_big_endian
import rlp
import blocks
MAX_GET_CHAIN_ACCEPT_HASHES = 2048 # Maximum number of send hashes GetChain will accept
MAX_GET_CHAIN_SEND_HASHES = 2048 # Maximum number of hashes GetChain will ever send
MAX_GET_CHAIN_ASK_BLOCKS = 512 # Maximum number of blocks GetChain will ever ask for
MAX_GET_CHAIN_REQUEST_BLOCKS = 512 # Maximum number of requested blocks GetChain will accept
MAX_BLOCKS_SEND = MAX_GET_CHAIN_REQUEST_BLOCKS # Maximum number of blocks Blocks will ever send
MAX_BLOCKS_ACCEPTED = MAX_BLOCKS_SEND # Maximum number of blocks Blocks will ever accept
logger = logging.getLogger(__name__)
class Peer(StoppableLoopThread):
def __init__(self, connection, ip, port):
super(Peer, self).__init__()
self._connection = connection
assert ip.count('.') == 3
self.ip = ip
# None if peer was created in response to external connect
self.port = port
self.client_id = ''
self.node_id = ''
self.response_queue = Queue.Queue()
self.hello_received = False
self.hello_sent = False
self.last_valid_packet_received = time.time()
self.last_asked_for_peers = 0
self.last_pinged = 0
self.recv_buffer = ''
# connect signals
def __repr__(self):
return "<Peer(%s:%r)>" % (self.ip, self.port)
def __str__(self):
return "[{0}: {1}]".format(self.ip, self.port)
def connection(self):
if self.stopped():
raise IOError("Connection was stopped")
else:
return self._connection
def stop(self):
super(Peer, self).stop()
# shut down
try:
self._connection.shutdown(socket.SHUT_RDWR)
except socket.error as e:
logger.debug(
"shutting down failed {0} \"{1}\"".format(repr(self), str(e)))
self._connection.close()
def send_packet(self, response):
logger.debug('sending packet to {0} >>> {1}'.format(
self, response.encode('hex')))
self.response_queue.put(response)
def _process_send(self):
'''
:return: size of processed data
'''
# send packet
try:
packet = self.response_queue.get(block=False)
except Queue.Empty:
packet = ''
while packet:
try:
n = self.connection().send(packet)
packet = packet[n:]
except socket.error as e:
logger.debug(
'{0}: send packet failed, {1}'
.format(self, str(e)))
self.stop()
break
if packet:
return len(packet)
else:
return 0
def _process_recv(self):
'''
:return: size of processed data
'''
while True:
try:
self.recv_buffer += self.connection().recv(2048)
except socket.error:
break
length = len(self.recv_buffer)
while self.recv_buffer:
self._process_recv_buffer()
return length
def _process_recv_buffer(self):
try:
cmd, data, self.recv_buffer = packeter.load_cmd(self.recv_buffer)
except Exception as e:
self.recv_buffer = ''
logger.warn(e)
return self.send_Disconnect(reason='Bad protocol')
# good peer
self.last_valid_packet_received = time.time()
logger.debug('receive from {0} <<< cmd: {1}: data: {2}'.format(
self, cmd,
rlp.encode(recursive_int_to_big_endian(data)).encode('hex')
))
func_name = "_recv_{0}".format(cmd)
if not hasattr(self, func_name):
logger.warn('unknown cmd \'{0}\''.format(func_name))
return
getattr(self, func_name)(data)
def send_Hello(self):
self.send_packet(packeter.dump_Hello())
self.hello_sent = True
def _recv_Hello(self, data):
# check compatibility
peer_protocol_version, network_id, client_id = idec(data[0]), idec(data[1]), data[2]
capabilities, listen_port, node_id = idec(data[3]), idec(data[4]), data[5]
logger.debug('received Hello %s V:%r N:%r C:%r P:%r I:%s', client_id,
peer_protocol_version, network_id, capabilities, listen_port,
node_id.encode('hex'))
if peer_protocol_version != packeter.PROTOCOL_VERSION:
return self.send_Disconnect(
reason='Incompatible network protocols')
if network_id != packeter.NETWORK_ID:
return self.send_Disconnect(reason='Wrong genesis block')
# add to known peers list in handshake signal
self.hello_received = True
self.client_id = client_id
self.node_id = node_id
self.port = listen_port # replace connection port with listen port
# reply with hello if not send
if not self.hello_sent:
self.send_Hello()
signals.peer_handshake_success.send(sender=Peer, peer=self)
def send_Ping(self):
self.send_packet(packeter.dump_Ping())
self.last_pinged = time.time()
def _recv_Ping(self, data):
self.send_Pong()
def send_Pong(self):
self.send_packet(packeter.dump_Pong())
def _recv_Pong(self, data):
pass
reasons_to_forget = ('Bad protocol',
'Incompatible network protocols',
'Wrong genesis block')
def send_Disconnect(self, reason=None):
logger.info('disconnecting {0}, reason: {1}'.format(
str(self), reason or ''))
self.send_packet(packeter.dump_Disconnect(reason=reason))
# end connection
time.sleep(2)
forget = reason in self.reasons_to_forget
signals.peer_disconnect_requested.send(Peer, peer=self, forget=forget)
def _recv_Disconnect(self, data):
if len(data):
reason = packeter.disconnect_reasons_map_by_id[idec(data[0])]
logger.info('{0} sent disconnect, {1} '.format(repr(self), reason))
forget = reason in self.reasons_to_forget
else:
forget = None
signals.peer_disconnect_requested.send(
sender=Peer, peer=self, forget=forget)
def send_GetPeers(self):
self.send_packet(packeter.dump_GetPeers())
def _recv_GetPeers(self, data):
signals.getpeers_received.send(sender=Peer, peer=self)
def send_Peers(self, peers):
if peers:
packet = packeter.dump_Peers(peers)
self.send_packet(packet)
def _recv_Peers(self, data):
addresses = []
for ip, port, pid in data:
assert len(ip) == 4
ip = '.'.join(str(ord(b)) for b in ip)
port = idec(port)
logger.debug('received peer address: {0}:{1}'.format(ip, port))
addresses.append([ip, port, pid])
signals.peer_addresses_received.send(sender=Peer, addresses=addresses)
def send_GetTransactions(self):
logger.info('asking for transactions')
self.send_packet(packeter.dump_GetTransactions())
def _recv_GetTransactions(self, data):
logger.info('asking for transactions')
signals.gettransactions_received.send(sender=Peer, peer=self)
def send_Transactions(self, transactions):
self.send_packet(packeter.dump_Transactions(transactions))
def _recv_Transactions(self, data):
logger.info('received transactions #%d', len(data))
signals.remote_transactions_received.send(
sender=Peer, transactions=data)
def send_Blocks(self, blocks):
assert len(blocks) <= MAX_BLOCKS_SEND
self.send_packet(packeter.dump_Blocks(blocks))
def _recv_Blocks(self, data):
# open('raw_remote_blocks_hex.txt', 'a').write(rlp.encode(data).encode('hex') + '\n') # LOG line
transient_blocks = [blocks.TransientBlock(rlp.encode(b)) for b in data] # FIXME
if len(transient_blocks) > MAX_BLOCKS_ACCEPTED:
logger.warn('Peer sending too many blocks %d', len(transient_blocks))
signals.remote_blocks_received.send(
sender=Peer, peer=self, transient_blocks=transient_blocks)
def send_GetChain(self, parents=[], count=1):
assert len(parents) <= MAX_GET_CHAIN_SEND_HASHES
assert count <= MAX_GET_CHAIN_ASK_BLOCKS
self.send_packet(packeter.dump_GetChain(parents, count))
def _recv_GetChain(self, data):
"""
[0x14, Parent1, Parent2, ..., ParentN, Count]
Request the peer to send Count (to be interpreted as an integer) blocks
in the current canonical block chain that are children of Parent1
(to be interpreted as a SHA3 block hash). If Parent1 is not present in
the block chain, it should instead act as if the request were for
Parent2 &c. through to ParentN. If the designated parent is the present
block chain head, an empty reply should be sent. If none of the parents
are in the current canonical block chain, then NotInChain should be
sent along with ParentN (i.e. the last Parent in the parents list).
If no parents are passed, then reply need not be made.
"""
block_hashes = data[:-1]
count = idec(data[-1])
if count > MAX_GET_CHAIN_REQUEST_BLOCKS:
logger.warn('GetChain: Peer asking for too many blocks %d', count)
if len(block_hashes) > MAX_GET_CHAIN_ACCEPT_HASHES:
logger.warn('GetChain: Peer sending too many block hashes %d', len(block_hashes))
signals.local_chain_requested.send(
sender=Peer, peer=self, block_hashes=block_hashes, count=count)
def send_NotInChain(self, block_hash):
self.send_packet(packeter.dump_NotInChain(block_hash))
def _recv_NotInChain(self, data):
pass
def loop_body(self):
try:
send_size = self._process_send()
recv_size = self._process_recv()
except IOError:
self.stop()
return
# pause
if not (send_size or recv_size):
time.sleep(0.01)
|
fentas/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/__init__.py | 143 | # Required for Python to search this directory for module files
from handlers.updatebase import UpdateBase
|
eaobservatory/hedwig | refs/heads/master | lib/hedwig/db/part/review.py | 1 | # Copyright (C) 2015-2020 East Asian Observatory
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful,but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301, USA
from __future__ import absolute_import, division, print_function, \
unicode_literals
from datetime import datetime
from sqlalchemy.sql import select
from sqlalchemy.sql.expression import and_, case, not_
from sqlalchemy.sql.functions import count
from ...error import ConsistencyError, Error, FormattedError, \
NoSuchRecord, UserError
from ...type.collection import GroupMemberCollection, ResultCollection, \
ReviewerCollection, ReviewerAcceptanceCollection, ReviewDeadlineCollection, \
ReviewFigureCollection
from ...type.enum import Assessment, FormatType, GroupType, ReviewState
from ...type.simple import GroupMember, Reviewer, ReviewerAcceptance, \
ReviewDeadline, ReviewFigureInfo
from ...util import is_list_like
from ..meta import call, decision, group_member, \
institution, invitation, person, \
proposal, queue, \
review, reviewer, reviewer_acceptance, review_deadline, \
review_fig, review_fig_link, review_fig_preview, review_fig_thumbnail
class ReviewPart(object):
def add_group_member(self, queue_id, group_type, person_id,
_conn=None, _test_skip_check=False):
if not GroupType.is_valid(group_type):
raise Error('invalid group type')
with self._transaction(_conn=_conn) as conn:
if not _test_skip_check:
if not self._exists_id(conn, queue, queue_id):
raise ConsistencyError('queue does not exist with id={}',
queue_id)
if not self._exists_id(conn, person, person_id):
raise ConsistencyError('person does not exist with id={}',
person_id)
result = conn.execute(group_member.insert().values({
group_member.c.queue_id: queue_id,
group_member.c.group_type: group_type,
group_member.c.person_id: person_id,
}))
return result.inserted_primary_key[0]
def add_reviewer(self, role_class, proposal_id, person_id, role,
_test_skip_check=False, _conn=None):
try:
role_info = role_class.get_info(role)
except KeyError:
raise Error('invalid reviewer role')
with self._transaction(_conn=_conn) as conn:
if not _test_skip_check:
if not self._exists_id(conn, proposal, proposal_id):
raise ConsistencyError(
'proposal does not exist with id={}', proposal_id)
if not self._exists_id(conn, person, person_id):
raise ConsistencyError(
'person does not exist with id={}', person_id)
# Do not allow the "unique" check to be skipped because there is
# no database constraint for this. (Which is because some roles
# are unique and others are not.)
if role_info.unique:
if self._exists_reviewer(conn, proposal_id, role):
raise UserError(
'There is already a "{}" reviewer for this proposal.',
role_info.name)
result = conn.execute(reviewer.insert().values({
reviewer.c.proposal_id: proposal_id,
reviewer.c.person_id: person_id,
reviewer.c.role: role,
}))
return result.inserted_primary_key[0]
def add_reviewer_acceptance(
self, role_class, proposal_id, person_id, role,
accepted, text, format_, _conn=None):
if not role_class.is_accepted_review(role):
raise Error('This reviewer role does not require acceptance.')
if text is None:
raise Error('Acceptance explanation text not specified.')
if not format_:
raise UserError('Text format not specified.')
if not FormatType.is_valid(format_):
raise UserError('Text format not recognised.')
values = {
reviewer_acceptance.c.proposal_id: proposal_id,
reviewer_acceptance.c.person_id: person_id,
reviewer_acceptance.c.role: role,
reviewer_acceptance.c.accepted: accepted,
reviewer_acceptance.c.text: text,
reviewer_acceptance.c.format: format_,
reviewer_acceptance.c.date: datetime.utcnow(),
}
with self._transaction(_conn=_conn) as conn:
if not self._exists_reviewer(conn, proposal_id, role, person_id):
raise ConsistencyError(
'reviewer does not exist for prop {} person {} role {}',
proposal_id, person_id, role)
result = conn.execute(reviewer_acceptance.insert().values(values))
return result.inserted_primary_key[0]
def add_review_figure(
self, reviewer_id,
type_, figure, caption, filename, uploader_person_id,
_test_skip_check=False):
return self._add_figure(
review_fig, review_fig_link, review_fig_link.c.reviewer_id,
reviewer_id, reviewer,
type_, figure, caption, filename, uploader_person_id,
_test_skip_check=_test_skip_check)
def delete_reviewer(self, reviewer_id=None,
proposal_id=None, person_id=None, role=None,
delete_review=False, _conn=None):
"""
Delete a reviewer record from the database.
This can, optionally, also delete any associated review. This
is because, in the database, review.reviewer references
reviewer.id with "ondelete" set to restrict. (We don't want
to accidentally delete reviews when working with th reviewer table.)
The option allows the deletion to be "cascaded" manually when
necessary.
Either selects the reviewer to delete by the "reviewer_id"
argument, if it is specified, or, otherwise, by the proposal_id,
person_id and role, all of whic must be specified together.
"""
with self._transaction(_conn=_conn) as conn:
if delete_review:
result = conn.execute(review.delete().where(
review.c.reviewer_id == reviewer_id))
if result.rowcount > 1:
raise ConsistencyError(
'multiple rows matched deleting reviews by {}',
reviewer_id)
stmt = reviewer.delete()
if reviewer_id is not None:
stmt = stmt.where(reviewer.c.id == reviewer_id)
elif proposal_id is None or person_id is None or role is None:
raise Error('Either reviewer_id or proposal/person/role '
'must be specified.')
else:
stmt = stmt.where(and_(reviewer.c.proposal_id == proposal_id,
reviewer.c.person_id == person_id,
reviewer.c.role == role))
result = conn.execute(stmt)
if result.rowcount != 1:
raise ConsistencyError(
'no row matched deleting reviewer {}', reviewer_id)
def delete_reviewer_acceptance(self, reviewer_acceptance_id, _conn=None):
"""
Delete a reviewer acceptance explanation record.
"""
stmt = reviewer_acceptance.delete().where(
reviewer_acceptance.c.id == reviewer_acceptance_id)
with self._transaction(_conn=_conn) as conn:
result = conn.execute(stmt)
if result.rowcount != 1:
raise ConsistencyError(
'no row matched deleting reviewer acceptance {}',
reviewer_acceptance_id)
def delete_review_figure(self, reviewer_id, id_):
where_extra = []
if reviewer_id is not None:
where_extra.append(review_fig_link.c.reviewer_id == reviewer_id)
return self._delete_figure(
review_fig, review_fig_link, id_, where_extra=where_extra)
def get_review_figure(self, reviewer_id, link_id, fig_id=None, md5sum=None):
where_extra = []
if reviewer_id is not None:
where_extra.append(review_fig_link.c.reviewer_id == reviewer_id)
return self._get_figure(
review_fig, review_fig_link, link_id, fig_id, md5sum,
where_extra=where_extra)
def get_review_figure_preview(
self, reviewer_id, link_id, fig_id=None, md5sum=None):
where_extra = []
if reviewer_id is not None:
where_extra.append(review_fig_link.c.reviewer_id == reviewer_id)
return self._get_figure_alternate(
review_fig, review_fig_link, review_fig_preview.c.preview,
link_id, fig_id, md5sum, where_extra=where_extra)
def get_review_figure_thumbnail(
self, reviewer_id, link_id, fig_id=None, md5sum=None):
where_extra = []
if reviewer_id is not None:
where_extra.append(review_fig_link.c.reviewer_id == reviewer_id)
return self._get_figure_alternate(
review_fig, review_fig_link, review_fig_thumbnail.c.thumbnail,
link_id, fig_id, md5sum, where_extra=where_extra)
def multiple_reviewer_update(self, role_class, remove=None, add=None):
"""
Perform multiple reviewer updates.
This is so that multiple updates to the reviewer assignment
table can be prepared and then carried out (here) in a single
transaction. The removals are performed first to avoid
triggering the uniqueness constraint when the reviewer for
a "unique" role is being changed.
The "role_class" for the relevant facility must be provided -- this
will be passed to "add_reviewer".
"remove" and "add" are lists of kwargs dictionaries to be passed
to "delete_reviewer" and "add_reviewer" respectively.
"""
with self._transaction() as conn:
if remove is not None:
for kwargs in remove:
self.delete_reviewer(_conn=conn, **kwargs)
if add is not None:
for kwargs in add:
self.add_reviewer(role_class=role_class,
_conn=conn, **kwargs)
def search_group_member(self, queue_id=None, group_type=None,
person_id=None, facility_id=None,
group_member_id=None,
with_person=False, with_queue=False, _conn=None):
select_from = group_member.join(queue)
select_columns = [
group_member,
queue.c.facility_id,
]
default = {}
if with_person:
select_columns.extend([
group_member,
person.c.name.label('person_name'),
person.c.public.label('person_public'),
(person.c.user_id.isnot(None)).label('person_registered'),
person.c.institution_id,
institution.c.name.label('institution_name'),
institution.c.department.label('institution_department'),
institution.c.organization.label('institution_organization'),
institution.c.country.label('institution_country'),
])
select_from = select_from.join(person).outerjoin(institution)
else:
default.update({
'person_name': None,
'person_public': None,
'person_registered': None,
'institution_id': None,
'institution_name': None,
'institution_department': None,
'institution_organization': None,
'institution_country': None,
})
if with_queue:
select_columns.extend([
queue.c.code.label('queue_code'),
queue.c.name.label('queue_name'),
])
else:
default.update({
'queue_code': None,
'queue_name': None,
})
stmt = select(select_columns).select_from(select_from)
if queue_id is not None:
stmt = stmt.where(group_member.c.queue_id == queue_id)
if group_type is not None:
if is_list_like(group_type):
stmt = stmt.where(group_member.c.group_type.in_(group_type))
else:
stmt = stmt.where(group_member.c.group_type == group_type)
if person_id is not None:
stmt = stmt.where(group_member.c.person_id == person_id)
if facility_id is not None:
stmt = stmt.where(queue.c.facility_id == facility_id)
if group_member_id is not None:
stmt = stmt.where(group_member.c.id == group_member_id)
if with_person:
stmt = stmt.order_by(person.c.name.asc())
else:
stmt = stmt.order_by(group_member.c.id.asc())
ans = GroupMemberCollection()
with self._transaction(_conn=_conn) as conn:
for row in conn.execute(stmt):
if default:
values = default.copy()
values.update(**row)
else:
values = row
ans[row['id']] = GroupMember(**values)
return ans
def search_reviewer(self,
proposal_id=None, role=None, reviewer_id=None,
person_id=None, review_state=None,
call_id=None, queue_id=None,
proposal_state=None, institution_id=None,
notified=None, accepted=(),
with_review=False, with_review_text=False,
with_review_note=False,
with_invitation=False,
with_acceptance=False,
_conn=None):
select_columns = [
reviewer,
person.c.name.label('person_name'),
person.c.public.label('person_public'),
(person.c.user_id.isnot(None)).label('person_registered'),
person.c.user_id.label('user_id'),
person.c.institution_id,
institution.c.name.label('institution_name'),
institution.c.department.label('institution_department'),
institution.c.organization.label('institution_organization'),
institution.c.country.label('institution_country'),
]
select_from = reviewer.join(person).outerjoin(institution)
iter_field = None
iter_list = None
default = {'review_extra': None}
if with_invitation:
select_columns.extend((
invitation.c.token.label('invitation_token'),
invitation.c.expiry.label('invitation_expiry'),
))
select_from = select_from.outerjoin(invitation)
else:
default.update({
'invitation_token': None,
'invitation_expiry': None,
})
if with_review:
select_columns.append(
self._expr_review_state().label('review_state'))
select_columns.extend((x.label('review_{}'.format(x.name))
for x in review.columns
if x not in (review.c.reviewer_id,
review.c.text,
review.c.note,
review.c.state)))
if with_review_text:
select_columns.append(review.c.text.label('review_text'))
else:
default['review_text'] = None
if with_review_note:
select_columns.append(review.c.note.label('review_note'))
else:
default['review_note'] = None
else:
default.update({
'review_{}'.format(x.name): None
for x in review.columns if x != review.c.reviewer_id})
if (with_review or (review_state is not None)):
select_from = select_from.outerjoin(review)
if ((call_id is not None)
or (queue_id is not None)
or (proposal_state is not None)):
select_from = select_from.join(proposal)
if queue_id is not None:
select_from = select_from.join(call)
if with_acceptance:
select_columns.extend((
reviewer_acceptance.c.accepted.label('acceptance_accepted'),
reviewer_acceptance.c.text.label('acceptance_text'),
reviewer_acceptance.c.format.label('acceptance_format'),
reviewer_acceptance.c.date.label('acceptance_date'),
))
select_from = select_from.outerjoin(reviewer_acceptance, and_(
reviewer.c.proposal_id == reviewer_acceptance.c.proposal_id,
reviewer.c.person_id == reviewer_acceptance.c.person_id,
reviewer.c.role == reviewer_acceptance.c.role,
))
else:
default.update({
'acceptance_accepted': None,
'acceptance_text': None,
'acceptance_format': None,
'acceptance_date': None,
})
stmt = select(select_columns).select_from(select_from)
if proposal_id is not None:
if is_list_like(proposal_id):
assert iter_field is None
iter_field = reviewer.c.proposal_id
iter_list = proposal_id
else:
stmt = stmt.where(reviewer.c.proposal_id == proposal_id)
if role is not None:
if is_list_like(role):
stmt = stmt.where(reviewer.c.role.in_(role))
else:
stmt = stmt.where(reviewer.c.role == role)
if reviewer_id is not None:
stmt = stmt.where(reviewer.c.id == reviewer_id)
if person_id is not None:
stmt = stmt.where(reviewer.c.person_id == person_id)
if review_state is not None:
if is_list_like(review_state):
stmt = stmt.where(self._expr_review_state().in_(review_state))
else:
stmt = stmt.where(self._expr_review_state() == review_state)
if call_id is not None:
if is_list_like(call_id):
stmt = stmt.where(proposal.c.call_id.in_(call_id))
else:
stmt = stmt.where(proposal.c.call_id == call_id)
if queue_id is not None:
if is_list_like(queue_id):
stmt = stmt.where(call.c.queue_id.in_(queue_id))
else:
stmt = stmt.where(call.c.queue_id == queue_id)
if proposal_state is not None:
if is_list_like(proposal_state):
stmt = stmt.where(proposal.c.state.in_(proposal_state))
else:
stmt = stmt.where(proposal.c.state == proposal_state)
if institution_id is not None:
stmt = stmt.where(person.c.institution_id == institution_id)
if accepted != ():
if accepted is None:
stmt = stmt.where(reviewer.c.accepted.is_(None))
elif accepted:
stmt = stmt.where(reviewer.c.accepted)
else:
stmt = stmt.where(not_(reviewer.c.accepted))
if notified is not None:
if notified:
stmt = stmt.where(reviewer.c.notified)
else:
stmt = stmt.where(not_(reviewer.c.notified))
ans = ReviewerCollection()
with self._transaction(_conn=_conn) as conn:
for iter_stmt in self._iter_stmt(stmt, iter_field, iter_list):
for row in conn.execute(iter_stmt.order_by(
reviewer.c.role, person.c.name, reviewer.c.id)):
values = default.copy()
values.update(**row)
ans[row['id']] = Reviewer(**values)
return ans
def _expr_review_state(self):
return case([
(not_(reviewer.c.accepted), ReviewState.REJECTED),
(review.c.reviewer_id.isnot(None), review.c.state),
], else_=ReviewState.NOT_DONE)
def search_reviewer_acceptance(
self, reviewer_acceptance_id=None,
proposal_id=None, person_id=None, role=None,
with_text=False,
_conn=None):
select_columns = [
reviewer_acceptance.c.id,
reviewer_acceptance.c.proposal_id,
reviewer_acceptance.c.person_id,
reviewer_acceptance.c.role,
reviewer_acceptance.c.accepted,
reviewer_acceptance.c.date,
]
default = {}
if with_text:
select_columns.extend((
reviewer_acceptance.c.text,
reviewer_acceptance.c.format,
))
else:
default.update({
'text': None,
'format': None,
})
stmt = select(select_columns)
iter_field = None
iter_list = None
if reviewer_acceptance_id is not None:
stmt = stmt.where(reviewer_acceptance.c.id == reviewer_acceptance_id)
if proposal_id is not None:
if is_list_like(proposal_id):
assert iter_field is None
iter_field = reviewer_acceptance.c.proposal_id
iter_list = proposal_id
else:
stmt = stmt.where(
reviewer_acceptance.c.proposal_id == proposal_id)
if person_id is not None:
if is_list_like(person_id):
assert iter_field is None
iter_field = reviewer_acceptance.c.person_id
iter_list = person_id
else:
stmt = stmt.where(reviewer_acceptance.c.person_id == person_id)
if role is not None:
if is_list_like(role):
stmt = stmt.where(reviewer_acceptance.c.role.in_(role))
else:
stmt = stmt.where(reviewer_acceptance.c.role == role)
ans = ReviewerAcceptanceCollection()
with self._transaction(_conn=_conn) as conn:
for iter_stmt in self._iter_stmt(stmt, iter_field, iter_list):
for row in conn.execute(iter_stmt.order_by(
reviewer_acceptance.c.id)):
values = default.copy()
values.update(**row)
ans[row['id']] = ReviewerAcceptance(**values)
return ans
def search_review_deadline(
self, call_id=None, role=None, _conn=None):
"""
Search for review deadlines.
"""
stmt = review_deadline.select()
iter_field = None
iter_list = None
if call_id is not None:
if is_list_like(call_id):
assert iter_field is None
iter_field = review_deadline.c.call_id
iter_list = call_id
else:
stmt = stmt.where(review_deadline.c.call_id == call_id)
if role is not None:
if is_list_like(role):
stmt = stmt.where(review_deadline.c.role.in_(role))
else:
stmt = stmt.where(review_deadline.c.role == role)
ans = ReviewDeadlineCollection()
with self._transaction(_conn=_conn) as conn:
for iter_stmt in self._iter_stmt(stmt, iter_field, iter_list):
for row in conn.execute(iter_stmt):
ans[row['id']] = ReviewDeadline(**row)
return ans
def search_review_figure(
self, reviewer_id=None, state=None, link_id=None, fig_id=None,
with_caption=False, with_uploader_name=False,
with_has_preview=False, order_by_date=False,
no_link=False):
where_extra = []
select_extra = []
default_extra = {}
if reviewer_id is not None:
if no_link:
raise Error('reviewer_id specified with no_link')
where_extra.append(review_fig_link.c.reviewer_id == reviewer_id)
if no_link:
default_extra.update({
'reviewer_id': None,
})
else:
select_extra.extend([
review_fig_link.c.reviewer_id,
])
return self._search_figure(
review_fig, (None if no_link else review_fig_link),
ReviewFigureInfo, ReviewFigureCollection,
state, link_id, fig_id,
with_caption, with_uploader_name, order_by_date,
with_has_preview_table=(
review_fig_preview if with_has_preview else None),
select_extra=select_extra, default_extra=default_extra,
where_extra=where_extra)
def set_decision(self, proposal_id, accept=(), exempt=None, ready=None,
note=None, note_format=None):
values = {}
if accept != ():
values[decision.c.accept] = accept
if exempt is not None:
values[decision.c.exempt] = exempt
if ready is not None:
values[decision.c.ready] = ready
if note is not None:
if not note_format:
raise UserError('Note format not specified.')
if not FormatType.is_valid(note_format):
raise UserError('Note format not recognised.')
values[decision.c.note] = note
values[decision.c.note_format] = note_format
if not values:
raise Error('no decision update specified')
decision_id = None
with self._transaction() as conn:
proposal = self.get_proposal(
facility_id=None, proposal_id=proposal_id,
with_decision=True, _conn=conn)
if proposal.has_decision:
result = conn.execute(decision.update().where(
decision.c.proposal_id == proposal_id
).values(values))
if result.rowcount != 1:
raise ConsistencyError(
'no rows matched updating decision for proposal {}',
proposal_id)
else:
values.update({
decision.c.proposal_id: proposal_id,
})
result = conn.execute(decision.insert().values(values))
decision_id = result.inserted_primary_key[0]
return decision_id
def set_review(self, role_class, reviewer_id, text, format_,
assessment, rating, weight,
note, note_format, note_public,
state):
if text is not None:
if not format_:
raise UserError('Text format not specified.')
if not FormatType.is_valid(format_):
raise UserError('Text format not recognised.')
if note is not None:
if not note_format:
raise UserError('Note format not specified.')
if not FormatType.is_valid(note_format):
raise UserError('Note format not recognised.')
if assessment is not None:
if not Assessment.is_valid(assessment):
raise UserError('Assessment value not recognised.')
if not (ReviewState.is_valid(state) and ReviewState.is_present(state)):
raise Error('invalid review state')
state_done = (state == ReviewState.DONE)
values = {
review.c.text: text,
review.c.format: (None if text is None else format_),
review.c.assessment: assessment,
review.c.rating: rating,
review.c.weight: weight,
review.c.edited: datetime.utcnow(),
review.c.note: note,
review.c.note_format: (None if note is None else note_format),
review.c.note_public: ((note is not None) and note_public),
review.c.state: state,
}
with self._transaction() as conn:
# Find out what type of review this is so that we can
# determine which attributes are appropriate.
reviewer = self.search_reviewer(
reviewer_id=reviewer_id, _conn=conn).get_single()
role_info = role_class.get_info(reviewer.role)
attr_values = {k.name: v for (k, v) in values.items()}
for attr in ('text', 'assessment', 'rating', 'weight', 'note'):
if getattr(role_info, attr):
# Check for missing attributes only if review is done.
if state_done and (attr_values[attr] is None):
raise FormattedError(
'The {} should be specified.', attr)
else:
if attr_values[attr] is not None:
raise FormattedError(
'The {} should not be specified.', attr)
# Check if the review already exists.
already_exists = self._exists_review(conn, reviewer_id=reviewer_id)
# Perform the insert/update.
if already_exists:
result = conn.execute(review.update().where(
review.c.reviewer_id == reviewer_id
).values(values))
if result.rowcount != 1:
raise ConsistencyError(
'no rows matched updating review {}', reviewer_id)
else:
values.update({
review.c.reviewer_id: reviewer_id,
})
result = conn.execute(review.insert().values(values))
def set_review_figure_preview(self, fig_id, preview):
self._set_figure_alternate(
review_fig_preview.c.preview, fig_id, preview)
def set_review_figure_thumbnail(self, fig_id, thumbnail):
self._set_figure_alternate(
review_fig_thumbnail.c.thumbnail, fig_id, thumbnail)
def sync_call_review_deadline(
self, role_class, call_id, records, _conn=None):
"""
Update the review deadlines for a call.
"""
records.validate(role_class)
with self._transaction(_conn=_conn) as conn:
if not self._exists_id(conn, call, call_id):
raise ConsistencyError(
'call does not exist with id={}', call_id)
return self._sync_records(
conn, review_deadline,
key_column=review_deadline.c.call_id, key_value=call_id,
records=records, unique_columns=(review_deadline.c.role,))
def sync_group_member(self, queue_id, group_type, records):
"""
Update the member records of the given group for the given queue.
Currently this just allows removing members of the group,
but could be extended if group members gain extra attributes.
(E.g. the chair of a group representing a committee.)
"""
if not GroupType.is_valid(group_type):
raise Error('invalid group type')
with self._transaction() as conn:
if not self._exists_id(conn, queue, queue_id):
raise ConsistencyError('queue does not exist with id={}',
queue_id)
self._sync_records(
conn, group_member,
key_column=(group_member.c.queue_id,
group_member.c.group_type),
key_value=(queue_id, group_type),
records=records,
update_columns=(),
forbid_add=True)
def sync_review_figure(self, reviewer_id, records):
"""
Update the figures for a review.
Currently only deleting figures and changing the sort order
is supported.
"""
records.ensure_sort_order()
with self._transaction() as conn:
if not self._exists_id(conn, reviewer, reviewer_id):
raise ConsistencyError(
'reviewer does not exist with id={}', reviewer_id)
(n_insert, n_update, n_delete) = self._sync_records(
conn, review_fig_link, review_fig_link.c.reviewer_id, reviewer_id,
records, update_columns=(
review_fig_link.c.sort_order,
), forbid_add=True)
if n_delete:
self._remove_orphan_records(
conn, review_fig, review_fig_link.c.fig_id)
return (n_insert, n_update, n_delete)
def update_reviewer(
self, role_class, reviewer_id,
notified=None, accepted=()):
"""
Update the status information of a reviewer record.
"""
with self._transaction() as conn:
try:
reviewer_record = self.search_reviewer(
reviewer_id=reviewer_id, _conn=conn).get_single()
except NoSuchRecord:
raise ConsistencyError(
'reviewer does not exist with id={}', reviewer_id)
try:
role_info = role_class.get_info(reviewer_record.role)
except KeyError:
raise Error('reviewer has invalid role')
values = {}
if notified is not None:
if role_info.review_group is None:
raise Error('reviewer role is not assigned')
values['notified'] = notified
if accepted != ():
if not role_info.accept:
raise Error('reviewer role is not accepted')
values['accepted'] = accepted
if not values:
raise Error('no reviewer updates specified')
result = conn.execute(reviewer.update().where(
reviewer.c.id == reviewer_id
).values(values))
if result.rowcount != 1:
raise ConsistencyError(
'no rows matched updating reviewer with id={}',
reveiwer_id)
def update_reviewer_acceptance(
self, reviewer_acceptance_id, accepted=None, text=None, format_=None,
_conn=None):
if accepted is None:
raise Error('Accepted flag not specified.')
if text is None:
raise Error('Acceptance explanation text not specified.')
if not format_:
raise UserError('Text format not specified.')
if not FormatType.is_valid(format_):
raise UserError('Text format not recognised.')
values = {
reviewer_acceptance.c.accepted: accepted,
reviewer_acceptance.c.text: text,
reviewer_acceptance.c.format: format_,
reviewer_acceptance.c.date: datetime.utcnow(),
}
with self._transaction(_conn=_conn) as conn:
result = conn.execute(reviewer_acceptance.update().where(
reviewer_acceptance.c.id == reviewer_acceptance_id
).values(values))
if result.rowcount != 1:
raise ConsistencyError(
'no rows matched updating reviewer acceptance {}',
reviewer_acceptance_id)
def update_review_figure(
self, reviewer_id, link_id, fig_id=None,
figure=None, type_=None, filename=None, uploader_person_id=None,
state=None, state_prev=None, caption=None):
"""
Update the record of a figure attached to a review.
Can be used to update the figure or the state.
If the figure is updated, then the type, filename and uploader
must be specified and the state will be set to NEW -- the state
must not be specifed explicitly.
:return: the internal figure ID (not link ID) if it changed,
for test purposes only
"""
where_extra = []
if reviewer_id is not None:
where_extra.append(review_fig_link.c.reviewer_id == reviewer_id)
return self._update_figure(
review_fig, review_fig_link,
review_fig_preview, review_fig_thumbnail,
link_id, fig_id, figure, type_, filename, uploader_person_id,
state, state_prev, caption,
where_extra=where_extra,
)
def _exists_reviewer(self, conn, proposal_id, role, person_id=None):
"""
Test whether a reviewer record of the given role already exists
for a proposal.
"""
stmt = select([count(reviewer.c.id)]).where(and_(
reviewer.c.proposal_id == proposal_id,
reviewer.c.role == role
))
if person_id is not None:
stmt = stmt.where(reviewer.c.person_id == person_id)
return 0 < conn.execute(stmt).scalar()
def _exists_review(self, conn, reviewer_id):
"""
Test whether a review record by the given reviewer exists.
"""
return 0 < conn.execute(select([count(review.c.reviewer_id)]).where(
review.c.reviewer_id == reviewer_id
)).scalar()
|
vismartltd/edx-platform | refs/heads/master | common/test/acceptance/pages/studio/library.py | 28 | """
Library edit page in Studio
"""
from bok_choy.javascript import js_defined, wait_for_js
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from selenium.webdriver.support.select import Select
from .component_editor import ComponentEditorView
from .container import XBlockWrapper
from ...pages.studio.users import UsersPageMixin
from ...pages.studio.pagination import PaginatedMixin
from .utils import confirm_prompt, wait_for_notification
from . import BASE_URL
class LibraryPage(PageObject):
"""
Base page for Library pages. Defaults URL to the edit page.
"""
def __init__(self, browser, locator):
super(LibraryPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""
URL to the library edit page for the given library.
"""
return "{}/library/{}".format(BASE_URL, unicode(self.locator))
def is_browser_on_page(self):
"""
Returns True iff the browser has loaded the library edit page.
"""
return self.q(css='body.view-library').present
class LibraryEditPage(LibraryPage, PaginatedMixin, UsersPageMixin):
"""
Library edit page in Studio
"""
def get_header_title(self):
"""
The text of the main heading (H1) visible on the page.
"""
return self.q(css='h1.page-header-title').text
def wait_until_ready(self):
"""
When the page first loads, there is a loading indicator and most
functionality is not yet available. This waits for that loading to
finish.
Always call this before using the page. It also disables animations
for improved test reliability.
"""
self.wait_for_ajax()
super(LibraryEditPage, self).wait_until_ready()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the container page.
"""
return self._get_xblocks()
def are_previews_showing(self):
"""
Determines whether or not previews are showing for XBlocks
"""
return all([not xblock.is_placeholder() for xblock in self.xblocks])
def toggle_previews(self):
"""
Clicks the preview toggling button and waits for the previews to appear or disappear.
"""
toggle = not self.are_previews_showing()
self.q(css='.toggle-preview-button').click()
EmptyPromise(
lambda: self.are_previews_showing() == toggle,
'Preview is visible: %s' % toggle,
timeout=30
).fulfill()
self.wait_until_ready()
def click_duplicate_button(self, xblock_id):
"""
Click on the duplicate button for the given XBlock
"""
self._action_btn_for_xblock_id(xblock_id, "duplicate").click()
wait_for_notification(self)
self.wait_for_ajax()
def click_delete_button(self, xblock_id, confirm=True):
"""
Click on the delete button for the given XBlock
"""
self._action_btn_for_xblock_id(xblock_id, "delete").click()
if confirm:
confirm_prompt(self) # this will also wait_for_notification()
self.wait_for_ajax()
def _get_xblocks(self):
"""
Create an XBlockWrapper for each XBlock div found on the page.
"""
prefix = '.wrapper-xblock.level-page '
return self.q(css=prefix + XBlockWrapper.BODY_SELECTOR).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))
).results
def _div_for_xblock_id(self, xblock_id):
"""
Given an XBlock's usage locator as a string, return the WebElement for
that block's wrapper div.
"""
return self.q(css='.wrapper-xblock.level-page .studio-xblock-wrapper').filter(
lambda el: el.get_attribute('data-locator') == xblock_id
)
def _action_btn_for_xblock_id(self, xblock_id, action):
"""
Given an XBlock's usage locator as a string, return one of its action
buttons.
action is 'edit', 'duplicate', or 'delete'
"""
return self._div_for_xblock_id(xblock_id)[0].find_element_by_css_selector(
'.header-actions .{action}-button.action-button'.format(action=action)
)
class StudioLibraryContentEditor(ComponentEditorView):
"""
Library Content XBlock Modal edit window
"""
# Labels used to identify the fields on the edit modal:
LIBRARY_LABEL = "Library"
COUNT_LABEL = "Count"
SCORED_LABEL = "Scored"
PROBLEM_TYPE_LABEL = "Problem Type"
@property
def library_name(self):
""" Gets name of library """
return self.get_selected_option_text(self.LIBRARY_LABEL)
@library_name.setter
def library_name(self, library_name):
"""
Select a library from the library select box
"""
self.set_select_value(self.LIBRARY_LABEL, library_name)
EmptyPromise(lambda: self.library_name == library_name, "library_name is updated in modal.").fulfill()
@property
def count(self):
"""
Gets value of children count input
"""
return int(self.get_setting_element(self.COUNT_LABEL).get_attribute('value'))
@count.setter
def count(self, count):
"""
Sets value of children count input
"""
count_text = self.get_setting_element(self.COUNT_LABEL)
count_text.clear()
count_text.send_keys(count)
EmptyPromise(lambda: self.count == count, "count is updated in modal.").fulfill()
@property
def scored(self):
"""
Gets value of scored select
"""
value = self.get_selected_option_text(self.SCORED_LABEL)
if value == 'True':
return True
elif value == 'False':
return False
raise ValueError("Unknown value {value} set for {label}".format(value=value, label=self.SCORED_LABEL))
@scored.setter
def scored(self, scored):
"""
Sets value of scored select
"""
self.set_select_value(self.SCORED_LABEL, str(scored))
EmptyPromise(lambda: self.scored == scored, "scored is updated in modal.").fulfill()
@property
def capa_type(self):
"""
Gets value of CAPA type select
"""
return self.get_setting_element(self.PROBLEM_TYPE_LABEL).get_attribute('value')
@capa_type.setter
def capa_type(self, value):
"""
Sets value of CAPA type select
"""
self.set_select_value(self.PROBLEM_TYPE_LABEL, value)
EmptyPromise(lambda: self.capa_type == value, "problem type is updated in modal.").fulfill()
def set_select_value(self, label, value):
"""
Sets the select with given label (display name) to the specified value
"""
elem = self.get_setting_element(label)
select = Select(elem)
select.select_by_value(value)
@js_defined('window.LibraryContentAuthorView')
class StudioLibraryContainerXBlockWrapper(XBlockWrapper):
"""
Wraps :class:`.container.XBlockWrapper` for use with LibraryContent blocks
"""
url = None
def is_browser_on_page(self):
"""
Returns true iff the library content area has been loaded
"""
return self.q(css='article.content-primary').visible
def is_finished_loading(self):
"""
Returns true iff the Loading indicator is not visible
"""
return not self.q(css='div.ui-loading').visible
@classmethod
def from_xblock_wrapper(cls, xblock_wrapper):
"""
Factory method: creates :class:`.StudioLibraryContainerXBlockWrapper` from :class:`.container.XBlockWrapper`
"""
return cls(xblock_wrapper.browser, xblock_wrapper.locator)
def get_body_paragraphs(self):
"""
Gets library content body paragraphs
"""
return self.q(css=self._bounded_selector(".xblock-message-area p"))
@wait_for_js # Wait for the fragment.initialize_js('LibraryContentAuthorView') call to finish
def refresh_children(self):
"""
Click "Update now..." button
"""
btn_selector = self._bounded_selector(".library-update-btn")
self.wait_for_element_presence(btn_selector, 'Update now button is present.')
self.q(css=btn_selector).first.click()
# This causes a reload (see cms/static/xmodule_js/public/js/library_content_edit.js)
self.wait_for(lambda: self.is_browser_on_page(), 'StudioLibraryContainerXBlockWrapper has reloaded.')
# Wait longer than the default 60 seconds, because this was intermittently failing on jenkins
# with the screenshot showing that the Loading indicator was still visible. See TE-745.
self.wait_for(lambda: self.is_finished_loading(), 'Loading indicator is not visible.', timeout=120)
# And wait to make sure the ajax post has finished.
self.wait_for_ajax()
self.wait_for_element_absence(btn_selector, 'Wait for the XBlock to finish reloading')
|
bovee/Aston | refs/heads/master | aston/tracefile/mime.py | 1 | import binascii
from glob import glob
import inspect
from importlib import import_module
import mimetypes
import os
import os.path as op
from aston.cache import cache
# File types from http://en.wikipedia.org/wiki/Mass_spectrometry_data_format
# and http://www.amdis.net/What_is_AMDIS/AMDIS_Detailed/amdis_detailed.html
# TODO: .FID | Bruker instrument data format
# TODO: .LRP | Shrader/GCMate
# TODO: .MS | Varian Saturn Files
# TODO: .MS | HP Benchtop and MS Engines
# TODO: .MS | Finnigan (GCQ,INCOS and ITDS formats) also *.MI & *.DAT
# TODO: .MSF | Bruker
# TODO: .PKL | MassLynx associated format
# TODO: .RAW | Micromass MassLynx directory format
# TODO: .RAW | PerkinElmer TurboMass file format
# TODO: .SMS | Saturn SMS
# TODO: .WIFF| ABI/Sciex (QSTAR and QTRAP instrument) format
# TODO: .YEP | Bruker instrument data format
mimes = """
image/png png 8950
application/fasta fa,fasta,fna,faa 3E
text/csv csv *
application/mzxml mzxml *
application/mzml mzml *
application/netcdf cdf 4344
application/fcs fcs,lmd 46435333
application/vnd-sequencing-ab1 ab1,abi 4142
application/vnd-sequencing-scf scf 2E73
application/vnd-agilent-chemstation-pump lpmp1.reg 0233
application/vnd-agilent-chemstation-fraction lafc1fd.reg 0233
application/vnd-agilent-chemstation-flowinject acqres.reg 0233
application/vnd-agilent-chemstation-lcstat lcdiag.reg 0233
application/vnd-agilent-chemstation-flowinject acqres.reg 0233
application/vnd-agilent-masshunter-pump cappump.cd *
application/vnd-agilent-masshunter-temp tcc1.cd *
application/vnd-agilent-masshunter-acqmethod acq_method.xml *
application/vnd-agilent-masshunter-sampleinfo sample_info.xml *
application/vnd-agilent-masshunter-msmsscan bin 0101
application/vnd-agilent-masshunter-dad sd *
application/vnd-agilent-chemstation-fid ch 0238
application/vnd-agilent-chemstation-fid2 ch 0331
application/vnd-agilent-chemstation-ms ms 0132
application/vnd-agilent-chemstation-mwd ch 0233
application/vnd-agilent-chemstation-mwd2 ch 0331
application/vnd-agilent-chemstation-dad uv 0233
application/vnd-agilent-chemstation-dad2 uv 0331
application/vnd-bruker-msms ami *
application/vnd-bruker-baf baf 2400
application/vnd-inficon-hapsite hps 0403
application/vnd-sciex-wiff wiff D0CF
application/vnd-thermo-cf cf FFFF
application/vnd-thermo-dxf dxf FFFF
application/vnd-thermo-raw raw 01A1
application/vnd-waters-autospec idx *
"""
def get_mimetype(filename, magic_all):
ft_magic = {}
ft_ext = {}
for line in mimes.strip('\n').split('\n'):
mime, ext, magic = line.split()
if magic != '*':
for m in magic.split(','):
ft_magic[m] = mime
if ext != '*':
for e in ext.split(','):
ft_ext[e] = mime
# TODO: maybe do some kind of ranking?
# need to allow multiple filetypes for common magic/extensions
for i in [4, 2, 1]:
magic = binascii.b2a_hex(magic_all[:i]).decode('ascii').upper()
if magic in ft_magic:
return ft_magic[magic]
if filename is not None:
ext = os.path.splitext(filename)[1].lower()[1:]
if ext in ft_ext:
return ft_ext[ext]
return mimetypes.guess_type(filename)[0]
@cache(maxsize=1)
def tfclasses():
"""
A mapping of mimetypes to every class for reading data files.
"""
# automatically find any subclasses of TraceFile in the same
# directory as me
classes = {}
mydir = op.dirname(op.abspath(inspect.getfile(get_mimetype)))
tfcls = {"<class 'aston.tracefile.TraceFile'>",
"<class 'aston.tracefile.ScanListFile'>"}
for filename in glob(op.join(mydir, '*.py')):
name = op.splitext(op.basename(filename))[0]
module = import_module('aston.tracefile.' + name)
for clsname in dir(module):
cls = getattr(module, clsname)
if hasattr(cls, '__base__'):
if str(cls.__base__) in tfcls:
classes[cls.mime] = cls
return classes
|
Panaetius/woipv | refs/heads/master | src/models/test_leak.py | 1 | import argparse
import psutil
from os import getpid
import tensorflow as tf
import numpy as np
def create_model(input_size, output_size):
# model placeholders:
shape = tf.clip_by_value(tf.cast(tf.random_normal([2]) * 38.0 + 64.0, tf.int32), 38, 120)
shape = tf.concat([[1], shape, [512]], axis=0)
return tf.cast(tf.ones(shape, dtype=tf.int64), tf.int32)
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--max_epochs', type=int, default=10000)
parser.add_argument('--batch_size', type=int, default=7000)
parser.add_argument('--input_size', type=int, default=100)
parser.add_argument('--output_size', type=int, default=100)
parser.add_argument('--device', type=str, default="gpu:0")
return parser.parse_args(args=args)
def main():
args = parse_args()
session_conf = tf.ConfigProto(allow_soft_placement=True)
np.random.seed(1234)
process = psutil.Process(getpid())
with tf.Session(config=session_conf) as session, tf.device(args.device):
op = create_model(args.input_size, args.output_size)
session.run(tf.global_variables_initializer())
before = process.memory_percent()
for epoch in range(args.max_epochs):
session.run(op)
if epoch % 100 == 0:
after = process.memory_percent()
print("MEMORY CHANGE %.4f -> %.4f" % (before, after))
before = after
if __name__ == "__main__":
main() |
nilsonmorales/Puppyes-nightrc | refs/heads/master | usr/local/lib/python2.7/dist-packages/youtube_dl/extractor/__init__.py | 1 | from .appletrailers import AppleTrailersIE
from .addanime import AddAnimeIE
from .anitube import AnitubeIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVFutureIE,
)
from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .c56 import C56IE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .clipsyndicate import ClipsyndicateIE
from .cnn import CNNIE
from .collegehumor import CollegeHumorIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .condenast import CondeNastIE
from .criterion import CriterionIE
from .cspan import CSpanIE
from .d8 import D8IE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import DaumIE
from .depositfiles import DepositFilesIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
from .defense import DefenseGouvFrIE
from .ebaumsworld import EbaumsWorldIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .eitb import EitbIE
from .escapist import EscapistIE
from .exfm import ExfmIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
France2IE,
GenerationQuoiIE
)
from .freesound import FreesoundIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gamespot import GameSpotIE
from .gametrailers import GametrailersIE
from .generic import GenericIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .hark import HarkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .hypem import HypemIE
from .ign import IGNIE, OneUPIE
from .imdb import ImdbIE
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE
from .internetvideoarchive import InternetVideoArchiveIE
from .jeuxvideo import JeuxVideoIE
from .jukebox import JukeboxIE
from .justintv import JustinTVIE
from .kankan import KankanIE
from .keezmovies import KeezMoviesIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .liveleak import LiveLeakIE
from .livestream import LivestreamIE, LivestreamOriginalIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mit import TechTVMITIE, MITIE
from .mixcloud import MixcloudIE
from .mofosex import MofosexIE
from .mtv import MTVIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import NBCNewsIE
from .newgrounds import NewgroundsIE
from .nhl import NHLIE, NHLVideocenterIE
from .niconico import NiconicoIE
from .nowvideo import NowVideoIE
from .ooyala import OoyalaIE
from .orf import ORFIE
from .pbs import PBSIE
from .photobucket import PhotobucketIE
from .podomatic import PodomaticIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtlnow import RTLnowIE
from .rutube import RutubeIE
from .sina import SinaIE
from .slashdot import SlashdotIE
from .slideshare import SlideshareIE
from .sohu import SohuIE
from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE
from .southparkstudios import (
SouthParkStudiosIE,
SouthparkDeIE,
)
from .space import SpaceIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
from .stanfordoc import StanfordOpenClassroomIE
from .statigram import StatigramIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .sztvhu import SztvHuIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .tf1 import TF1IE
from .thisav import ThisAVIE
from .toutv import TouTvIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tutv import TutvIE
from .tvp import TvpIE
from .unistra import UnistraIE
from .ustream import UstreamIE, UstreamChannelIE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vevo import VevoIE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videodetective import VideoDetectiveIE
from .videofyme import VideofyMeIE
from .videopremium import VideoPremiumIE
from .vimeo import (
VimeoIE,
VimeoChannelIE,
VimeoUserIE,
)
from .vine import VineIE
from .viki import VikiIE
from .vk import VKIE
from .wat import WatIE
from .websurg import WeBSurgIE
from .weibo import WeiboIE
from .wimp import WimpIE
from .worldstarhiphop import WorldStarHipHopIE
from .xhamster import XHamsterIE
from .xnxx import XNXXIE
from .xvideos import XVideosIE
from .xtube import XTubeIE
from .yahoo import (
YahooIE,
YahooNewsIE,
YahooSearchIE,
)
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .youtube import (
YoutubeIE,
YoutubePlaylistIE,
YoutubeSearchIE,
YoutubeSearchDateIE,
YoutubeUserIE,
YoutubeChannelIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeRecommendedIE,
YoutubeTruncatedURLIE,
YoutubeWatchLaterIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
)
from .zdf import ZDFIE
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name+'IE']
|
atosatto/ansible | refs/heads/devel | lib/ansible/module_utils/network_common.py | 60 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleFallbackNotFound
def to_list(val):
if isinstance(val, (list, tuple, set)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class ComplexDict(object):
"""Transforms a dict to with an argument spec
This class will take a dict and apply an Ansible argument spec to the
values. The resulting dict will contain all of the keys in the param
with appropriate values set.
Example::
argument_spec = dict(
command=dict(key=True),
display=dict(default='text', choices=['text', 'json']),
validate=dict(type='bool')
)
transform = ComplexDict(argument_spec, module)
value = dict(command='foo')
result = transform(value)
print result
{'command': 'foo', 'display': 'text', 'validate': None}
Supported argument spec:
* key - specifies how to map a single value to a dict
* read_from - read and apply the argument_spec from the module
* required - a value is required
* type - type of value (uses AnsibleModule type checker)
* fallback - implements fallback function
* choices - set of valid options
* default - default value
"""
def __init__(self, attrs, module):
self._attributes = attrs
self._module = module
self.attr_names = frozenset(self._attributes.keys())
self._has_key = False
for name, attr in iteritems(self._attributes):
if attr.get('read_from'):
spec = self._module.argument_spec.get(attr['read_from'])
if not spec:
raise ValueError('argument_spec %s does not exist' % attr['read_from'])
for key, value in iteritems(spec):
if key not in attr:
attr[key] = value
if attr.get('key'):
if self._has_key:
raise ValueError('only one key value can be specified')
self_has_key = True
attr['required'] = True
def _dict(self, value):
obj = {}
for name, attr in iteritems(self._attributes):
if attr.get('key'):
obj[name] = value
else:
obj[name] = attr.get('default')
return obj
def __call__(self, value):
if not isinstance(value, dict):
value = self._dict(value)
unknown = set(value).difference(self.attr_names)
if unknown:
raise ValueError('invalid keys: %s' % ','.join(unknown))
for name, attr in iteritems(self._attributes):
if not value.get(name):
value[name] = attr.get('default')
if attr.get('fallback') and not value.get(name):
fallback = attr.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
value[name] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
if attr.get('required') and value.get(name) is None:
raise ValueError('missing required attribute %s' % name)
if 'choices' in attr:
if value[name] not in attr['choices']:
raise ValueError('%s must be one of %s, got %s' % \
(name, ', '.join(attr['choices']), value[name]))
if value[name] is not None:
value_type = attr.get('type', 'str')
type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(value[name])
return value
class ComplexList(ComplexDict):
"""Extends ```ComplexDict``` to handle a list of dicts """
def __call__(self, values):
if not isinstance(values, (list, tuple)):
raise TypeError('value must be an ordered iterable')
return [(super(ComplexList, self).__call__(v)) for v in values]
|
jun-wan/scilifelab | refs/heads/master | tests/data.py | 4 |
data_files = {}
data_files['RunInfo.xml'] = """<?xml version="1.0"?>
<RunInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" Version="2">
<Run Id="120924_SN0002_0003_CC003CCCXX" Number="1">
<Flowcell>CC003CCCXX</Flowcell>
<Instrument>SN0002</Instrument>
<Date>120924</Date>
<Reads>
<Read Number="1" NumCycles="101" IsIndexedRead="N" />
<Read Number="2" NumCycles="7" IsIndexedRead="Y" />
<Read Number="3" NumCycles="101" IsIndexedRead="N" />
</Reads>
<FlowcellLayout LaneCount="8" SurfaceCount="2" SwathCount="3" TileCount="16" />
<AlignToPhiX>
<Lane>1</Lane>
<Lane>2</Lane>
<Lane>3</Lane>
<Lane>4</Lane>
<Lane>5</Lane>
<Lane>6</Lane>
<Lane>7</Lane>
<Lane>8</Lane>
</AlignToPhiX>
</Run>
</RunInfo>
"""
|
disruptek/boto | refs/heads/develop | boto/vpc/internetgateway.py | 170 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an Internet Gateway
"""
from boto.ec2.ec2object import TaggedEC2Object
from boto.resultset import ResultSet
class InternetGateway(TaggedEC2Object):
def __init__(self, connection=None):
super(InternetGateway, self).__init__(connection)
self.id = None
self.attachments = []
def __repr__(self):
return 'InternetGateway:%s' % self.id
def startElement(self, name, attrs, connection):
result = super(InternetGateway, self).startElement(name, attrs, connection)
if result is not None:
# Parent found an interested element, just return it
return result
if name == 'attachmentSet':
self.attachments = ResultSet([('item', InternetGatewayAttachment)])
return self.attachments
else:
return None
def endElement(self, name, value, connection):
if name == 'internetGatewayId':
self.id = value
else:
setattr(self, name, value)
class InternetGatewayAttachment(object):
def __init__(self, connection=None):
self.vpc_id = None
self.state = None
def __repr__(self):
return 'InternetGatewayAttachment:%s' % self.vpc_id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'vpcId':
self.vpc_id = value
elif name == 'state':
self.state = value
|
rockmans/KyleAndEmily | refs/heads/master | kyleandemily/rsvp/management/commands/total_guests.py | 1 | from django.core.management.base import BaseCommand, CommandError
from kyleandemily.rsvp.models import Invitation
class Command(BaseCommand):
help = 'Prints the guest totals'
def handle(self, *args, **options):
welcome_guests = 0
wedding_guests = 0
farewell_guests = 0
for inv in Invitation.objects.all():
welcome_guests += inv.welcome_guests
wedding_guests += inv.wedding_guests
farewell_guests += inv.farewell_guests
self.stdout.write('Total Welcome Dinner Guests: {0}'.format(welcome_guests))
self.stdout.write('Total Wedding Guests: {0}'.format(wedding_guests))
self.stdout.write('Total Farewell Brunch Guests: {0}'.format(farewell_guests)) |
pchaigno/grr | refs/heads/master | lib/flows/general/memory.py | 1 | #!/usr/bin/env python
"""Flows for controlling access to memory.
These flows allow for distributing memory access modules to clients and
performing basic analysis.
"""
import json
import logging
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flow
from grr.lib import rekall_profile_server
from grr.lib import utils
# For RekallResponseCollection pylint: disable=unused-import
from grr.lib.aff4_objects import aff4_rekall
# pylint: enable=unused-import
from grr.lib.flows.general import file_finder
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import flows_pb2
class MemoryCollectorCondition(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.MemoryCollectorCondition
class MemoryCollectorWithoutLocalCopyDumpOption(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.MemoryCollectorWithoutLocalCopyDumpOption
class MemoryCollectorWithLocalCopyDumpOption(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.MemoryCollectorWithLocalCopyDumpOption
class MemoryCollectorDumpOption(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.MemoryCollectorDumpOption
class MemoryCollectorDownloadAction(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.MemoryCollectorDownloadAction
class MemoryCollectorSendToSocketAction(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.MemoryCollectorSendToSocketAction
class MemoryCollectorAction(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.MemoryCollectorAction
class MemoryCollectorArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.MemoryCollectorArgs
class MemoryCollector(flow.GRRFlow):
"""Flow for scanning and imaging memory.
MemoryCollector applies "action" (e.g. Download) to memory if memory contents
match all given "conditions". Matches are then written to the results
collection. If there are no "conditions", "action" is applied immediately.
MemoryCollector replaces deprecated DownloadMemoryImage and
ImageMemoryToSocket.
When downloading memory:
If the file transfer fails and you specified local copy, you can attempt to
download again using the FileFinder flow without needing to copy all of memory
to disk again. FileFinder will only retrieve parts of the image that weren't
already downloaded. Note that if the flow fails, you'll need to run
Administrative/DeleteGRRTempFiles to clean up the disk.
When imaging memory to socket:
Choose a key and an IV in hex format (if run from the GUI,
there will be a pregenerated pair key and iv for you to use) and run a
listener on the server you want to use like this:
nc -l <port> | openssl aes-128-cbc -d -K <key> -iv <iv> > <filename>
"""
friendly_name = "Memory Collector"
category = "/Memory/"
behaviours = flow.GRRFlow.behaviours + "BASIC"
args_type = MemoryCollectorArgs
def ConditionsToFileFinderConditions(self, conditions):
ff_condition_type_cls = file_finder.FileFinderCondition.Type
result = []
for c in conditions:
if c.condition_type == MemoryCollectorCondition.Type.LITERAL_MATCH:
result.append(file_finder.FileFinderCondition(
condition_type=ff_condition_type_cls.CONTENTS_LITERAL_MATCH,
contents_literal_match=c.literal_match))
elif c.condition_type == MemoryCollectorCondition.Type.REGEX_MATCH:
result.append(file_finder.FileFinderCondition(
condition_type=ff_condition_type_cls.CONTENTS_REGEX_MATCH,
contents_regex_match=c.regex_match))
else:
raise ValueError("Unknown condition type: %s", c.condition_type)
return result
@flow.StateHandler(next_state="StoreMemoryInformation")
def Start(self):
self.state.Register("memory_information", None)
self.state.Register(
"destdir",
self.args.action.download.dump_option.with_local_copy.destdir)
self.CallFlow("LoadMemoryDriver",
driver_installer=self.args.driver_installer,
next_state="StoreMemoryInformation")
def _DiskFreeCheckRequired(self):
ac = self.args.action
dump_option = ac.download.dump_option
return all([
ac.action_type == "DOWNLOAD",
dump_option.option_type == "WITH_LOCAL_COPY",
dump_option.with_local_copy.check_disk_free_space])
@flow.StateHandler(next_state=["Filter", "StoreTmpDir", "CheckDiskFree"])
def StoreMemoryInformation(self, responses):
if not responses.success:
raise flow.FlowError("Failed due to no memory driver:%s." %
responses.status)
self.state.memory_information = responses.First()
if self._DiskFreeCheckRequired():
if self.state.destdir:
self.CallStateInline(next_state="CheckDiskFree")
else:
self.CallClient("GetConfiguration", next_state="StoreTmpDir")
else:
self.CallStateInline(next_state="Filter")
@flow.StateHandler(next_state=["CheckDiskFree"])
def StoreTmpDir(self, responses):
# For local copy we need to know where the file will land to check for
# disk free space there. The default is to leave this blank, which will
# cause the client to use Client.tempdir
if not responses.success:
raise flow.FlowError("Couldn't get client config: %s." % responses.status)
self.state.destdir = responses.First().get("Client.tempdir")
if not self.state.destdir:
# This means Client.tempdir wasn't explicitly defined in the client
# config, so we use the current server value with the right context for
# the client instead. This may differ from the default value deployed
# with the client, but it's a fairly safe bet.
self.state.destdir = config_lib.CONFIG.Get(
"Client.tempdir",
context=GetClientContext(self.client_id, self.token))
if not self.state.destdir:
raise flow.FlowError("Couldn't determine Client.tempdir file "
"destination, required for disk free check: %s.")
self.Log("Couldn't get Client.tempdir from client for disk space check,"
"guessing %s from server config", self.state.destdir)
self.CallFlow("DiskVolumeInfo",
path_list=[self.state.destdir],
next_state="CheckDiskFree")
@flow.StateHandler(next_state=["Filter"])
def CheckDiskFree(self, responses):
if not responses.success or not responses.First():
raise flow.FlowError(
"Couldn't determine disk free space for path %s" %
self.args.action.download.dump_option.with_local_copy.destdir)
free_space = responses.First().FreeSpaceBytes()
mem_size = 0
for run in self.state.memory_information.runs:
mem_size += run.length
if free_space < mem_size:
# We expect that with compression the disk required will be significantly
# less, so this ensures there will still be some left once we are done.
raise flow.FlowError("Free space may be too low for local copy. Free "
"space on volume %s is %s bytes. Mem size is: %s "
"bytes. Override with check_disk_free_space=False."
% (self.state.destdir, free_space, mem_size))
self.CallStateInline(next_state="Filter")
@flow.StateHandler(next_state=["Action"])
def Filter(self, responses):
if self.args.conditions:
self.CallFlow("FileFinder",
paths=[self.state.memory_information.device.path],
pathtype=rdf_paths.PathSpec.PathType.MEMORY,
conditions=self.ConditionsToFileFinderConditions(
self.args.conditions),
next_state="Action")
else:
self.CallStateInline(next_state="Action")
@property
def action_options(self):
if self.args.action.action_type == MemoryCollectorAction.Action.DOWNLOAD:
return self.args.action.download
elif (self.args.action.action_type ==
MemoryCollectorAction.Action.SEND_TO_SOCKET):
return self.args.action.send_to_socket
@flow.StateHandler(next_state=["Transfer"])
def Action(self, responses):
if not responses.success:
raise flow.FlowError(
"Applying conditions failed: %s" % (responses.status))
if self.args.conditions:
if not responses:
self.Status("Memory doesn't match specified conditions.")
return
for response in responses:
for match in response.matches:
self.SendReply(match)
if self.action_options:
if (self.action_options.dump_option.option_type ==
MemoryCollectorDumpOption.Option.WITHOUT_LOCAL_COPY):
self.CallStateInline(next_state="Transfer")
elif (self.action_options.dump_option.option_type ==
MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY):
dump_option = self.action_options.dump_option.with_local_copy
self.CallClient("CopyPathToFile",
offset=dump_option.offset,
length=dump_option.length,
src_path=self.state.memory_information.device,
dest_dir=dump_option.destdir,
gzip_output=dump_option.gzip,
next_state="Transfer")
else:
self.Status("Nothing to do: no action specified.")
@flow.StateHandler(next_state=["Done"])
def Transfer(self, responses):
# We can only get a failure if Transfer is called from CopyPathToFile
if not responses.success:
raise flow.FlowError("Local copy failed: %s" % (responses.status))
if (self.action_options.dump_option.option_type ==
MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY):
self.state.Register("memory_src_path", responses.First().dest_path)
else:
self.state.Register("memory_src_path",
self.state.memory_information.device)
if self.args.action.action_type == MemoryCollectorAction.Action.DOWNLOAD:
self.CallFlow("GetFile", pathspec=self.state.memory_src_path,
next_state="Done")
elif (self.args.action.action_type ==
MemoryCollectorAction.Action.SEND_TO_SOCKET):
options = self.state.args.action.send_to_socket
self.CallClient("SendFile", key=utils.SmartStr(options.key),
iv=utils.SmartStr(options.iv),
pathspec=self.state.memory_src_path,
address_family=options.address_family,
host=options.host,
port=options.port,
next_state="Done")
@flow.StateHandler(next_state=["DeleteFile"])
def Done(self, responses):
"""'Done' state always gets executed after 'Transfer' state is done."""
if not responses.success:
# Leave file on disk to allow the user to retry GetFile without having to
# copy the whole memory image again.
raise flow.FlowError("Transfer of %s failed %s" % (
self.state.memory_src_path, responses.status))
if self.args.action.action_type == MemoryCollectorAction.Action.DOWNLOAD:
stat = responses.First()
self.state.Register("downloaded_file", stat.aff4path)
self.Log("Downloaded %s successfully." % self.state.downloaded_file)
self.Notify("ViewObject", self.state.downloaded_file,
"Memory image transferred successfully")
self.Status("Memory image transferred successfully.")
self.SendReply(stat)
elif (self.args.action.action_type ==
MemoryCollectorAction.Action.SEND_TO_SOCKET):
self.Status("Memory image transferred successfully.")
if (self.action_options.dump_option.option_type ==
MemoryCollectorDumpOption.Option.WITH_LOCAL_COPY):
self.CallClient("DeleteGRRTempFiles",
self.state.memory_src_path, next_state="DeleteFile")
@flow.StateHandler()
def DeleteFile(self, responses):
"""Checks for errors from DeleteGRRTempFiles called from 'Done' state."""
if not responses.success:
raise flow.FlowError("Removing local file %s failed: %s" % (
self.state.memory_src_path, responses.status))
class DownloadMemoryImageArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.DownloadMemoryImageArgs
class DownloadMemoryImage(flow.GRRFlow):
"""Copy memory image to local disk then retrieve the file.
DEPRECATED.
This flow is now deprecated in favor of MemoryCollector. Please use
MemoryCollector without conditions with "Download" action. You can
set "dump option" to "create local copy first" or "don't create local copy".
Returns to parent flow:
A rdfvalue.CopyPathToFileRequest.
"""
category = "/Memory/"
args_type = DownloadMemoryImageArgs
# This flow is also a basic flow.
behaviours = flow.GRRFlow.behaviours + "ADVANCED"
@classmethod
def GetDefaultArgs(cls, token=None):
_ = token
result = cls.args_type()
result.length = (1024 ** 4) # 1 TB
return result
@flow.StateHandler(next_state="PrepareImage")
def Start(self):
self.CallFlow("LoadMemoryDriver",
driver_installer=self.args.driver_installer,
next_state="PrepareImage")
@flow.StateHandler(next_state=["DownloadFile", "Done"])
def PrepareImage(self, responses):
if not responses.success:
raise flow.FlowError("Failed due to no memory driver.")
memory_information = responses.First()
if self.args.make_local_copy:
self.CallClient("CopyPathToFile",
offset=self.args.offset,
length=self.args.length,
src_path=memory_information.device,
dest_dir=self.args.destdir,
gzip_output=self.args.gzip,
next_state="DownloadFile")
else:
self.CallFlow("GetFile", pathspec=memory_information.device,
next_state="Done")
@flow.StateHandler(next_state="DeleteFile")
def DownloadFile(self, responses):
if not responses.success:
raise flow.FlowError(
"Error copying memory to file: %s." % responses.status)
self.state.Register("dest_path", responses.First().dest_path)
self.CallFlow("GetFile", pathspec=self.state.dest_path,
next_state="DeleteFile")
@flow.StateHandler(next_state="End")
def DeleteFile(self, responses):
"""Delete the temporary file from disk."""
if not responses.success:
# Leave file on disk to allow the user to retry GetFile without having to
# copy the whole memory image again.
raise flow.FlowError("Transfer of %s failed %s" % (self.state.dest_path,
responses.status))
stat = responses.First()
self.SendReply(stat)
self.state.Register("downloaded_file", stat.aff4path)
self.Status("Downloaded %s successfully" % self.state.downloaded_file)
self.CallClient("DeleteGRRTempFiles",
self.state.dest_path, next_state="End")
@flow.StateHandler()
def End(self, responses):
if not responses.success:
raise flow.FlowError("Delete of %s failed %s" % (self.state.dest_path,
responses.status))
self.Notify("ViewObject", self.state.downloaded_file,
"Memory image transferred successfully")
class LoadMemoryDriverArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.LoadMemoryDriverArgs
class LoadMemoryDriver(flow.GRRFlow):
"""Load a memory driver on the client.
Note that AnalyzeClientMemory will do this for you if you call it.
"""
category = "/Memory/"
args_type = LoadMemoryDriverArgs
@flow.StateHandler(next_state=["LoadDriver", "CheckMemoryInformation"])
def Start(self):
"""Check if driver is already loaded."""
self.state.Register("device_urn", self.client_id.Add("devices/memory"))
self.state.Register("installer_urns", [])
self.state.Register("current_installer", None)
if not self.args.driver_installer:
# Fetch the driver installer from the data store.
self.state.installer_urns = GetMemoryModules(self.client_id,
token=self.token)
# Create a protobuf containing the request.
if not self.state.installer_urns:
raise IOError("Could not determine path for memory driver. No module "
"available for this platform.")
if self.args.reload_if_loaded:
self.CallStateInline(next_state="LoadDriver")
else:
# We just check for one of the drivers, assuming that they all use the
# same device path.
installer = GetDriverFromURN(self.state.installer_urns[0],
token=self.token)
self.CallClient("GetMemoryInformation",
rdf_paths.PathSpec(
path=installer.device_path,
pathtype=rdf_paths.PathSpec.PathType.MEMORY),
next_state="CheckMemoryInformation")
@flow.StateHandler(next_state=["LoadDriver", "GotMemoryInformation"])
def CheckMemoryInformation(self, responses):
"""Check if the driver is loaded and responding."""
if responses.success:
# Memory driver exists, send reply directly to GotMemoryInformation.
self.CallState(next_state="GotMemoryInformation",
messages=[responses.First()])
else:
# Driver needs loading.
self.CallState(next_state="LoadDriver")
@flow.StateHandler(next_state=["InstalledDriver"])
def LoadDriver(self, _):
if not self.state.installer_urns:
raise flow.FlowError("Could not find a working memory driver")
installer_urn = self.state.installer_urns.pop(0)
installer = GetDriverFromURN(installer_urn, token=self.token)
self.state.current_installer = installer
# We want to force unload old driver and reload the current one.
installer.force_reload = 1
self.CallClient("InstallDriver", installer, next_state="InstalledDriver")
@flow.StateHandler(next_state=["GotMemoryInformation", "LoadDriver",
"InstalledDriver"])
def InstalledDriver(self, responses):
if not responses.success:
# This driver didn't work, let's try the next one.
self.CallStateInline(next_state="LoadDriver")
self.CallClient("GetMemoryInformation",
rdf_paths.PathSpec(
path=self.state.current_installer.device_path,
pathtype=rdf_paths.PathSpec.PathType.MEMORY),
next_state="GotMemoryInformation")
@flow.StateHandler()
def GotMemoryInformation(self, responses):
"""Confirm the driver initialized and add it to the VFS."""
if responses.success:
response = responses.First()
fd = aff4.FACTORY.Create(self.state.device_urn, "MemoryImage",
token=self.token)
layout = fd.Schema.LAYOUT(response)
fd.Set(fd.Schema.PATHSPEC(response.device))
fd.Set(layout)
fd.Close()
# Let a parent flow know which driver was installed.
self.SendReply(layout)
else:
raise flow.FlowError("Failed to query device %s (%s)" %
(self.state.current_installer.device_path,
responses.status))
@flow.StateHandler()
def End(self):
if self.state.context.state != rdf_flows.Flow.State.ERROR:
self.Notify("ViewObject", self.state.device_urn,
"Driver successfully initialized.")
def GetClientContext(client_id, token):
"""Get context for the given client id.
Get platform, os release, and arch contexts for the client.
Args:
client_id: The client_id of the host to use.
token: Token to use for access.
Returns:
array of client_context strings
"""
client_context = []
client = aff4.FACTORY.Open(client_id, token=token)
system = client.Get(client.Schema.SYSTEM)
if system:
client_context.append("Platform:%s" % system)
release = client.Get(client.Schema.OS_RELEASE)
if release:
client_context.append(utils.SmartStr(release))
arch = utils.SmartStr(client.Get(client.Schema.ARCH)).lower()
# Support synonyms for i386.
if arch == "x86":
arch = "i386"
if arch:
client_context.append("Arch:%s" % arch)
return client_context
def GetMemoryModules(client_id, token):
"""Given a host, returns a list of urns to appropriate memory modules.
Args:
client_id: The client_id of the host to use.
token: Token to use for access.
Returns:
A list of URNs pointing to GRRSignedDriver objects.
Raises:
IOError: on inability to get any driver.
The driver is retrieved from the AFF4 configuration space according to the
client's known attributes. The exact layout of the driver's configuration
space structure is determined by the configuration system.
The driver we are sending should have a signature associated with it. This
would get verified by the client (independently of OS driver signing). Having
this mechanism will allow for offline signing of drivers to reduce the risk of
the system being used to deploy evil things.
Since the client itself will verify the signature of the client, on the server
we must retrieve the corresponding private keys to the public key that the
client has. If the keys depend on the client's architecture, and operating
system, the configuration system will give the client different keys depending
on its operating system or architecture. In this case we need to match these
keys, and retrieve the correct keys.
For example, the configuration file can specify different keys for windows and
OSX clients:
Platform:Windows:
PrivateKeys.driver_signing_private_key: |
.... Key 1 .... (Private)
Client.driver_signing_public_key: |
.... Key 1 .... (Public)
Arch:amd64:
MemoryDriver.aff4_paths:
- aff4:/config/drivers/windows/pmem_amd64.sys
Arch:i386:
MemoryDriver.aff4_paths:
- aff4:/config/drivers/windows/pmem_x86.sys
"""
installer_urns = []
for aff4_path in config_lib.CONFIG.Get(
"MemoryDriver.aff4_paths", context=GetClientContext(client_id, token)):
logging.debug("Will fetch driver at %s for client %s",
aff4_path, client_id)
if GetDriverFromURN(aff4_path, token):
logging.debug("Driver at %s found.", aff4_path)
installer_urns.append(aff4_path)
else:
logging.debug("Unable to load driver at %s.", aff4_path)
if not installer_urns:
raise IOError("Unable to find a driver for client.")
return installer_urns
def GetDriverFromURN(urn, token=None):
"""Returns the actual driver from a driver URN."""
try:
fd = aff4.FACTORY.Open(urn, aff4_type="GRRMemoryDriver",
mode="r", token=token)
# Get the signed driver.
for driver_blob in fd:
# How should this driver be installed?
driver_installer = fd.Get(fd.Schema.INSTALLATION)
if driver_installer:
# Add the driver to the installer.
driver_installer.driver = driver_blob
return driver_installer
except IOError:
pass
return None
class AnalyzeClientMemoryArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.AnalyzeClientMemoryArgs
class AnalyzeClientMemory(flow.GRRFlow):
"""Runs client side analysis using Rekall.
This flow takes a list of Rekall plugins to run. It first calls
LoadMemoryDriver to ensure a Memory driver is loaded.
It then sends the list of Rekall commands to the client. The client will
run those plugins using the client's copy of Rekall.
Each plugin will return it's results and they will be stored in
RekallResultCollections.
"""
category = "/Memory/"
args_type = AnalyzeClientMemoryArgs
behaviours = flow.GRRFlow.behaviours + "BASIC"
@flow.StateHandler(next_state=["RunPlugins", "KcoreStatResult",
"StoreResults"])
def Start(self, _):
# Our output collection is a RekallResultCollection.
if self.runner.output is not None:
self.runner.output = aff4.FACTORY.Create(
self.runner.output.urn, "RekallResponseCollection",
mode="rw", token=self.token)
self.state.Register("rekall_context_messages", {})
self.state.Register("output_files", [])
self.state.Register("plugin_errors", [])
self.state.Register("rekall_request", self.args.request.Copy())
if self.args.debug_logging:
self.state.rekall_request.session[
u"logging_level"] = u"DEBUG"
# If a device is already provided, just us it.
if self.state.rekall_request.device:
self.CallClient("RekallAction", self.state.rekall_request,
next_state="StoreResults")
return
# If it is a linux client, check for kcore.
client = aff4.FACTORY.Open(self.client_id, token=self.token)
system = client.Get(client.Schema.SYSTEM)
if self.args.use_kcore_if_present and system == "Linux":
kcore_pathspec = rdf_paths.PathSpec(
path="/proc/kcore",
pathtype=rdf_paths.PathSpec.PathType.OS,
# Devices are always outside the chroot so we specify this flag so
# the client is able to locate it.
is_virtualroot=True)
self.CallClient("StatFile",
pathspec=kcore_pathspec,
next_state="KcoreStatResult")
return
self.CallFlow("LoadMemoryDriver", next_state="RunPlugins",
driver_installer=self.args.driver_installer)
@flow.StateHandler(next_state=["StoreResults", "RunPlugins"])
def KcoreStatResult(self, responses):
if responses.success:
self.state.rekall_request.device = responses.First().pathspec
self.CallClient("RekallAction", self.state.rekall_request,
next_state="StoreResults")
else:
self.CallFlow("LoadMemoryDriver", next_state="RunPlugins",
driver_installer=self.args.driver_installer)
@flow.StateHandler(next_state=["StoreResults"])
def RunPlugins(self, responses):
"""Call the client with the Rekall actions."""
if not responses.success:
raise flow.FlowError("Unable to install memory driver.")
memory_information = responses.First()
# Update the device from the result of LoadMemoryDriver.
self.state.rekall_request.device = memory_information.device
self.CallClient("RekallAction", self.state.rekall_request,
next_state="StoreResults")
@flow.StateHandler()
def UpdateProfile(self, responses):
if not responses.success:
self.Log(responses.status)
@flow.StateHandler(next_state=["StoreResults", "UpdateProfile",
"DeleteFiles"])
def StoreResults(self, responses):
"""Stores the results."""
if not responses.success:
self.state.plugin_errors.append(unicode(responses.status.error_message))
self.Log("Rekall returned %s responses." % len(responses))
for response in responses:
if response.missing_profile:
server_type = config_lib.CONFIG["Rekall.profile_server"]
logging.info("Getting missing Rekall profile '%s' from %s",
response.missing_profile, server_type)
profile_server = rekall_profile_server.ProfileServer.classes[
server_type]()
profile = profile_server.GetProfileByName(
response.missing_profile,
version=response.repository_version
)
if profile:
self.CallClient("WriteRekallProfile", profile,
next_state="UpdateProfile")
else:
self.Log("Needed profile %s not found! See "
"https://github.com/google/grr-doc/blob/master/"
"troubleshooting.adoc#missing-rekall-profiles",
response.missing_profile)
if response.json_messages:
response.client_urn = self.client_id
if self.state.rekall_context_messages:
response.json_context_messages = json.dumps(
self.state.rekall_context_messages.items(),
separators=(",", ":"))
json_data = json.loads(response.json_messages)
for message in json_data:
if len(message) >= 1:
if message[0] in ["t", "s"]:
self.state.rekall_context_messages[message[0]] = message[1]
if message[0] == "file":
pathspec = rdf_paths.PathSpec(**message[1])
self.state.output_files.append(pathspec)
if message[0] == "L":
if len(message) > 1:
log_record = message[1]
self.Log("%s:%s:%s", log_record["level"],
log_record["name"], log_record["msg"])
self.SendReply(response)
if responses.iterator.state != rdf_client.Iterator.State.FINISHED:
self.state.rekall_request.iterator = responses.iterator
self.CallClient("RekallAction", self.state.rekall_request,
next_state="StoreResults")
else:
if self.state.output_files:
self.Log("Getting %i files.", len(self.state.output_files))
self.CallFlow("MultiGetFile", pathspecs=self.state.output_files,
next_state="DeleteFiles")
@flow.StateHandler(next_state="LogDeleteFiles")
def DeleteFiles(self, responses):
# Check that the GetFiles flow worked.
if not responses.success:
raise flow.FlowError("Could not get files: %s" % responses.status)
for output_file in self.state.output_files:
self.CallClient("DeleteGRRTempFiles", output_file,
next_state="LogDeleteFiles")
@flow.StateHandler()
def LogDeleteFiles(self, responses):
# Check that the DeleteFiles flow worked.
if not responses.success:
raise flow.FlowError("Could not delete file: %s" % responses.status)
@flow.StateHandler()
def End(self):
if self.state.plugin_errors:
all_errors = u"\n".join([unicode(e) for e in self.state.plugin_errors])
raise flow.FlowError("Error running plugins: %s" % all_errors)
if self.runner.output is not None:
self.Notify("ViewObject", self.runner.output.urn,
"Ran analyze client memory")
class UnloadMemoryDriver(LoadMemoryDriver):
"""Unloads a memory driver on the client."""
category = "/Memory/"
args_type = LoadMemoryDriverArgs
@flow.StateHandler(next_state=["Done"])
def Start(self):
"""Start processing."""
self.state.Register("driver_installer", self.args.driver_installer)
self.state.Register("success", False)
if self.args.driver_installer:
self.CallClient("UninstallDriver", self.state.driver_installer,
next_state="Done")
return
urns = GetMemoryModules(self.client_id, self.token)
if not urns:
raise IOError("No memory driver currently available for this system.")
for urn in urns:
installer = GetDriverFromURN(urn, token=self.token)
self.CallClient("UninstallDriver", installer, next_state="Done")
@flow.StateHandler()
def Done(self, responses):
if responses.success:
self.state.success = True
@flow.StateHandler()
def End(self):
if not self.state.success:
raise flow.FlowError("Failed to uninstall memory driver.")
class ScanMemoryArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.ScanMemoryArgs
class ScanMemory(flow.GRRFlow):
"""Grep client memory for a signature.
This flow greps memory on the client for a pattern or a regex.
Returns to parent flow:
RDFValueArray of BufferReference objects.
"""
category = "/Memory/"
args_type = ScanMemoryArgs
XOR_IN_KEY = 37
XOR_OUT_KEY = 57
@flow.StateHandler(next_state="Grep")
def Start(self):
self.args.grep.xor_in_key = self.XOR_IN_KEY
self.args.grep.xor_out_key = self.XOR_OUT_KEY
self.CallFlow("LoadMemoryDriver", next_state="Grep")
@flow.StateHandler(next_state="Done")
def Grep(self, responses):
"""Run Grep on memory device pathspec."""
if not responses.success:
raise flow.FlowError("Error while loading memory driver: %s" %
responses.status.error_message)
memory_information = responses.First()
# Coerce the BareGrepSpec into a GrepSpec explicitly.
grep_request = rdf_client.GrepSpec(target=memory_information.device,
**self.args.grep.AsDict())
# For literal matches we xor the search term. This stops us matching the GRR
# client itself.
if self.args.grep.literal:
grep_request.literal = utils.Xor(
utils.SmartStr(self.args.grep.literal), self.XOR_IN_KEY)
self.CallClient("Grep", request=grep_request, next_state="Done")
@flow.StateHandler(next_state="End")
def Done(self, responses):
if responses.success:
for hit in responses:
# Decode the hit data from the client.
hit.data = utils.Xor(hit.data, self.XOR_OUT_KEY)
self.SendReply(hit)
if self.args.also_download:
self.CallFlow("DownloadMemoryImage", next_state="End")
else:
raise flow.FlowError("Error grepping memory: %s.", responses.status)
class ListVADBinariesArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.ListVADBinariesArgs
class ListVADBinaries(flow.GRRFlow):
"""Get list of all running binaries from Rekall, (optionally) fetch them.
This flow executes the "vad" Rekall plugin to get the list of all
currently running binaries (including dynamic libraries). Then if
fetch_binaries option is set to True, it fetches all the binaries it has
found.
There is a caveat regarding using the "vad" plugin to detect currently
running executable binaries. The "Filename" member of the _FILE_OBJECT
struct is not reliable:
* Usually it does not include volume information: i.e.
\\Windows\\some\\path. Therefore it's impossible to detect the actual
volume where the executable is located.
* If the binary is executed from a shared network volume, the Filename
attribute is not descriptive enough to easily fetch the file.
* If the binary is executed directly from a network location (without
mounting the volume) Filename attribute will contain yet another
form of path.
* Filename attribute is not actually used by the system (it's probably
there for debugging purposes). It can be easily overwritten by a rootkit
without any noticeable consequences for the running system, but breaking
our functionality as a result.
Therefore this plugin's functionality is somewhat limited. Basically, it
won't fetch binaries that are located on non-default volumes.
Possible workaround (future work):
* Find a way to map given address space into the filename on the filesystem.
* Fetch binaries directly from memory by forcing page-ins first (via
some debug userland-process-dump API?) and then reading the memory.
"""
category = "/Memory/"
args_type = ListVADBinariesArgs
@flow.StateHandler(next_state="FetchBinaries")
def Start(self):
"""Request VAD data."""
if self.runner.output is not None:
self.runner.output.Set(self.runner.output.Schema.DESCRIPTION(
"GetProcessesBinariesRekall binaries (regex: %s) " %
self.args.filename_regex or "None"))
self.CallFlow("ArtifactCollectorFlow",
artifact_list=["FullVADBinaryList"],
store_results_in_aff4=False,
next_state="FetchBinaries")
@flow.StateHandler(next_state="HandleDownloadedFiles")
def FetchBinaries(self, responses):
"""Parses the Rekall response and initiates FileFinder flows."""
if not responses.success:
self.Log("Error fetching VAD data: %s", responses.status)
return
self.Log("Found %d binaries", len(responses))
if self.args.filename_regex:
binaries = []
for response in responses:
if self.args.filename_regex.Match(response.CollapsePath()):
binaries.append(response)
self.Log("Applied filename regex. Have %d files after filtering.",
len(binaries))
else:
binaries = responses
if self.args.fetch_binaries:
self.CallFlow(
"FileFinder",
next_state="HandleDownloadedFiles",
paths=[rdf_paths.GlobExpression(b.CollapsePath()) for b in binaries],
pathtype=rdf_paths.PathSpec.PathType.OS,
action=file_finder.FileFinderAction(
action_type=file_finder.FileFinderAction.Action.DOWNLOAD))
else:
for b in binaries:
self.SendReply(b)
@flow.StateHandler()
def HandleDownloadedFiles(self, responses):
"""Handle success/failure of the FileFinder flow."""
if responses.success:
for file_finder_result in responses:
self.SendReply(file_finder_result.stat_entry)
self.Log("Downloaded %s",
file_finder_result.stat_entry.pathspec.CollapsePath())
else:
self.Log("Binaries download failed: %s", responses.status)
|
graphite-project/graphite-web | refs/heads/master | webapp/tests/test_storage.py | 5 | import os
import random
import shutil
import time
import whisper
from django.conf import settings
from django.test import override_settings
from mock import patch, Mock
from .base import TestCase
from graphite.finders.utils import BaseFinder
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
from graphite.readers.utils import BaseReader
from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index
from graphite.tags.localdatabase import LocalDatabaseTagDB
from graphite.worker_pool.pool import PoolTimeoutError
from graphite.render.datalib import TimeSeries
from graphite.render.evaluator import evaluateTarget
from graphite.util import epoch_to_dt
class StorageTest(TestCase):
def test_fetch(self):
disabled_finder = get_finders('tests.test_storage.DisabledFinder')[0]
legacy_finder = get_finders('tests.test_storage.LegacyFinder')[0]
test_finder = get_finders('tests.test_storage.TestFinder')[0]
remote_finder = get_finders('tests.test_storage.RemoteFinder')[0]
store = Store(
finders=[disabled_finder, legacy_finder, test_finder, remote_finder],
tagdb=get_tagdb('graphite.tags.localdatabase.LocalDatabaseTagDB')
)
# tagb is properly initialized
self.assertIsInstance(store.tagdb, LocalDatabaseTagDB)
# get all enabled finders
finders = store.get_finders()
self.assertEqual(list(finders), [legacy_finder, test_finder, remote_finder])
# get only local finders
finders = store.get_finders(local=True)
self.assertEqual(list(finders), [legacy_finder, test_finder])
# fetch with empty patterns
result = store.fetch([], 1, 2, 3, {})
self.assertEqual(result, [])
# fetch
result = store.fetch(['a.**'], 1, 2, 3, {})
self.assertEqual(len(result), 3)
result.sort(key=lambda node: node['name'])
self.assertEqual(result[0]['name'], 'a.b.c.d')
self.assertEqual(result[0]['pathExpression'], 'a.**')
self.assertEqual(result[1]['name'], 'a.b.c.d')
self.assertEqual(result[1]['pathExpression'], 'a.**')
self.assertEqual(result[2]['name'], 'a.b.c.e')
self.assertEqual(result[2]['pathExpression'], 'a.**')
def test_fetch_pool_timeout(self):
# pool timeout
store = Store(
finders=[RemoteFinder()]
)
def mock_pool_exec(pool, jobs, timeout):
raise PoolTimeoutError()
message = r'Timed out after [-.e0-9]+s for fetch for \[\'a\'\]'
with patch('graphite.storage.pool_exec', mock_pool_exec):
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], message)
def test_fetch_all_failed(self):
# all finds failed
store = Store(
finders=[TestFinder()]
)
message = r'All requests failed for fetch for \[\'a\'\] \(1\)'
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(
log_info.call_args[0][0],
r'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes'
)
store = Store(
finders=[TestFinder(), TestFinder()]
)
message = r'All requests failed for fetch for \[\'a\'\] \(2\)'
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(
log_info.call_args[0][0],
r'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes'
)
def test_fetch_some_failed(self):
# some finders failed
store = Store(
finders=[TestFinder(), RemoteFinder()]
)
with patch('graphite.storage.log.info') as log_info:
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
store = Store(
finders=[TestFinder(), TestFinder()]
)
message = r'All requests failed for fetch for \[\'a\'\] \(2\)'
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(
log_info.call_args[0][0],
r'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes'
)
@override_settings(STORE_FAIL_ON_ERROR=True)
def test_fetch_some_failed_hard_fail_enabled(self):
# all finds failed
store = Store(
finders=[TestFinder(), RemoteFinder()]
)
message = r'1 request\(s\) failed for fetch for \[\'a\'\] \(2\)'
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(
log_info.call_args[0][0],
r'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes'
)
store = Store(
finders=[TestFinder(), TestFinder()]
)
message = r'All requests failed for fetch for \[\'a\'\] \(2\)'
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(
log_info.call_args[0][0],
r'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes'
)
def test_find(self):
disabled_finder = DisabledFinder()
legacy_finder = LegacyFinder()
test_finder = TestFinder()
remote_finder = RemoteFinder()
store = Store(
finders=[disabled_finder, legacy_finder, test_finder, remote_finder],
tagdb=get_tagdb('graphite.tags.localdatabase.LocalDatabaseTagDB')
)
# find nodes
result = list(store.find('a'))
self.assertEqual(len(result), 5)
for node in result:
if node.path in ['a.b.c.d', 'a.b.c.e']:
self.assertIsInstance(node, LeafNode)
else:
self.assertIsInstance(node, BranchNode)
self.assertTrue(node.path in ['a', 'a.b', 'a.b.c'])
# find leaves only
result = list(store.find('a', leaves_only=True))
self.assertEqual(len(result), 2)
for node in result:
self.assertIsInstance(node, LeafNode)
self.assertTrue(node.path in ['a.b.c.d', 'a.b.c.e'])
# failure threshold
message = r'Query a yields too many results and failed \(failure threshold is 1\)'
with self.settings(METRICS_FIND_FAILURE_THRESHOLD=1):
with self.assertRaisesRegexp(Exception, message):
list(store.find('a'))
# warning threshold
with self.settings(METRICS_FIND_WARNING_THRESHOLD=1):
with patch('graphite.storage.log.warning') as log_warning:
list(store.find('a'))
self.assertEqual(log_warning.call_count, 1)
self.assertEqual(
log_warning.call_args[0][0],
'Query a yields large number of results up to 2 (warning threshold is 1)'
)
def test_find_pool_timeout(self):
# pool timeout
store = Store(
finders=[RemoteFinder()]
)
def mock_pool_exec(pool, jobs, timeout):
raise PoolTimeoutError()
message = r'Timed out after [-.e0-9]+s for find <FindQuery: a from \* until \*>'
with patch('graphite.storage.pool_exec', mock_pool_exec):
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.find('a'))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], message)
def test_find_all_failed(self):
# all finds failed
store = Store(
finders=[TestFinder()]
)
message = r'All requests failed for find <FindQuery: a from \* until \*>'
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.find('a'))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(
log_info.call_args[0][0],
r'Exception during find <FindQuery: a from \* until \*> after [-.e0-9]+s: TestFinder.find_nodes'
)
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.find('a'))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(
log_info.call_args[0][0],
r'Exception during find <FindQuery: a from \* until \*> after [-.e0-9]+s: TestFinder.find_nodes'
)
@override_settings(REMOTE_STORE_FORWARD_HEADERS=['X-Test1', 'X-Test2'])
def test_extractForwardHeaders(self):
class DummyRequest(object):
META = {
'HTTP_X_TEST1': 'test',
}
headers = extractForwardHeaders(DummyRequest())
self.assertEqual(headers, {'X-Test1': 'test'})
def test_get_index(self):
disabled_finder = DisabledFinder()
# use get_finders so legacy_finder is patched with get_index
legacy_finder = get_finders('tests.test_storage.LegacyFinder')[0]
test_finder = TestFinder()
remote_finder = RemoteFinder()
store = Store(
finders=[disabled_finder, legacy_finder, test_finder, remote_finder],
tagdb=get_tagdb('graphite.tags.localdatabase.LocalDatabaseTagDB')
)
# get index
result = store.get_index()
self.assertEqual(result, ['a.b.c.d', 'a.b.c.e'])
# get local index
result = store.get_index({'localOnly': True})
self.assertEqual(result, ['a.b.c.d'])
def test_get_index_pool_timeout(self):
# pool timeout
store = Store(
finders=[RemoteFinder()]
)
def mock_pool_exec(pool, jobs, timeout):
raise PoolTimeoutError()
with patch('graphite.storage.pool_exec', mock_pool_exec):
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'Timed out after .*'):
store.get_index()
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], 'Timed out after [-.e0-9]+s')
def test_get_index_all_failed(self):
# all finders failed
store = Store(
finders=[TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for get_index'):
store.get_index()
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(
log_info.call_args[0][0],
'Exception during get_index after [-.e0-9]+s: TestFinder.find_nodes'
)
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, r'All requests failed for get_index \(2\)'):
store.get_index()
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(
log_info.call_args[0][0],
'Exception during get_index after [-.e0-9]+s: TestFinder.find_nodes'
)
@override_settings(USE_WORKER_POOL=False)
def test_fetch_tag_support(self):
class TestFinderTags(BaseFinder):
tags = True
def find_nodes(self, query):
pass
def fetch(self, patterns, start_time, end_time, now=None, requestContext=None):
if patterns != [
'seriesByTag("hello=tiger")',
'seriesByTag("name=notags")',
'seriesByTag("name=testtags")',
'testtags;hello=tiger']:
raise Exception('Unexpected patterns %s' % str(patterns))
return [
{
'pathExpression': 'testtags;hello=tiger',
'name': 'testtags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
},
{
'pathExpression': 'seriesByTag("hello=tiger")',
'name': 'testtags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
},
{
'pathExpression': 'seriesByTag("name=testtags")',
'name': 'testtags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
},
]
tagdb = Mock()
store = Store(
finders=[TestFinderTags()],
tagdb=tagdb
)
request_context = {
'startTime': epoch_to_dt(0),
'endTime': epoch_to_dt(60),
'now': epoch_to_dt(60),
}
with patch('graphite.render.datalib.STORE', store):
results = evaluateTarget(request_context, ['testtags;hello=tiger', 'seriesByTag("hello=tiger")', 'seriesByTag("name=testtags")', 'seriesByTag("name=notags")'])
self.assertEqual(results, [
TimeSeries('testtags;hello=tiger', 0, 60, 1, []),
TimeSeries('testtags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("hello=tiger")'),
TimeSeries('testtags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("name=testtags")'),
])
@override_settings(USE_WORKER_POOL=True)
def test_fetch_no_tag_support(self):
class TestFinderNoTags(BaseFinder):
tags = False
def find_nodes(self, query):
pass
def fetch(self, patterns, start_time, end_time, now=None, requestContext=None):
if patterns != ['notags;hello=tiger']:
raise Exception('Unexpected patterns %s' % str(patterns))
return [
{
'pathExpression': 'notags;hello=tiger',
'name': 'notags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
}
]
tagdb = Mock()
def mockFindSeries(exprs, requestContext=None):
self.assertEqual(requestContext, request_context)
if exprs == ('hello=tiger',) or exprs == ('name=notags',):
return ['notags;hello=tiger']
if exprs == ('name=testtags',):
return []
raise Exception('Unexpected exprs %s' % str(exprs))
tagdb.find_series.side_effect = mockFindSeries
store = Store(
finders=[TestFinderNoTags()],
tagdb=tagdb
)
with patch('graphite.render.datalib.STORE', store):
request_context = {
'startTime': epoch_to_dt(0),
'endTime': epoch_to_dt(60),
'now': epoch_to_dt(60),
}
results = evaluateTarget(request_context, ['notags;hello=tiger', 'seriesByTag("hello=tiger")', 'seriesByTag("name=testtags")', 'seriesByTag("name=notags")'])
self.assertEqual(tagdb.find_series.call_count, 3)
self.assertEqual(results, [
TimeSeries('notags;hello=tiger', 0, 60, 1, []),
TimeSeries('notags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("hello=tiger")'),
TimeSeries('notags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("name=notags")'),
])
def test_autocomplete(self):
test = self
class TestFinderTags(BaseFinder):
tags = True
def __init__(self, request_limit=100, request_context=None):
self.limit = request_limit
self.context = request_context or {}
def find_nodes(self, query):
pass
def auto_complete_tags(self, exprs, tagPrefix=None, limit=None, requestContext=None):
test.assertEqual(exprs, ['tag1=value1'])
test.assertEqual(tagPrefix, 'test')
test.assertEqual(limit, self.limit)
test.assertEqual(requestContext, self.context)
return ['testtags']
def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, requestContext=None):
test.assertEqual(exprs, ['tag1=value1'])
test.assertEqual(tag, 'tag2')
test.assertEqual(valuePrefix, 'test')
test.assertEqual(limit, self.limit)
test.assertEqual(requestContext, self.context)
return ['testtags']
class TestFinderNoTags(BaseFinder):
tags = False
def find_nodes(self, query):
pass
class TestFinderTagsException(TestFinderTags):
def auto_complete_tags(self, exprs, tagPrefix=None, limit=None, requestContext=None):
raise Exception('TestFinderTagsException.auto_complete_tags')
def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, requestContext=None):
raise Exception('TestFinderTagsException.auto_complete_values')
class TestFinderTagsTimeout(TestFinderTags):
def auto_complete_tags(self, exprs, tagPrefix=None, limit=None, requestContext=None):
time.sleep(0.1)
return ['testtags']
def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, requestContext=None):
time.sleep(0.1)
return ['testtags']
def mockStore(finders, request_limit=100, request_context=None):
tagdb = Mock()
def mockAutoCompleteTags(exprs, tagPrefix=None, limit=None, requestContext=None):
self.assertEqual(exprs, ['tag1=value1'])
self.assertEqual(tagPrefix, 'test')
self.assertEqual(limit, request_limit)
self.assertEqual(requestContext, request_context or {})
return ['testnotags']
tagdb.auto_complete_tags.side_effect = mockAutoCompleteTags
def mockAutoCompleteValues(exprs, tag, valuePrefix=None, limit=None, requestContext=None):
self.assertEqual(exprs, ['tag1=value1'])
self.assertEqual(tag, 'tag2')
self.assertEqual(valuePrefix, 'test')
self.assertEqual(limit, request_limit)
self.assertEqual(requestContext, request_context or {})
return ['testnotags']
tagdb.auto_complete_values.side_effect = mockAutoCompleteValues
return Store(
finders=finders,
tagdb=tagdb,
)
request_context = {}
# test with both tag-enabled and non-tag-enabled finders
store = mockStore([TestFinderTags(), TestFinderNoTags()])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
# test with no limit & no requestContext
store = mockStore([TestFinderTags(None, {}), TestFinderNoTags()], None, {})
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test')
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test')
self.assertEqual(store.tagdb.auto_complete_values.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
# test with only tag-enabled finder
store = mockStore([TestFinderTags()])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 0)
self.assertEqual(result, ['testtags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 0)
self.assertEqual(result, ['testtags'])
# test with only non-tag-enabled finder
store = mockStore([TestFinderNoTags()])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 1)
self.assertEqual(result, ['testnotags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 1)
self.assertEqual(result, ['testnotags'])
# test with no finders
store = mockStore([])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 0)
self.assertEqual(result, [])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 0)
self.assertEqual(result, [])
# test exception handling with one finder
store = mockStore([TestFinderTagsException()])
with self.assertRaisesRegexp(Exception, r'All requests failed for tags for \[\'tag1=value1\'\] test.*'):
store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
with self.assertRaisesRegexp(Exception, r'All requests failed for values for \[\'tag1=value1\'\] tag2 test.*'):
store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
# test exception handling with more than one finder
store = mockStore([TestFinderTagsException(), TestFinderTagsException()])
with self.assertRaisesRegexp(Exception, r'All requests failed for tags for \[\'tag1=value1\'\] test'):
store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
with self.assertRaisesRegexp(Exception, r'All requests failed for values for \[\'tag1=value1\'\] tag2 test'):
store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
# test pool timeout handling
store = mockStore([TestFinderTagsTimeout()])
with self.settings(USE_WORKER_POOL=True, FIND_TIMEOUT=0):
with self.assertRaisesRegexp(Exception, r'Timed out after [-.e0-9]+s for tags for \[\'tag1=value1\'\]'):
store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
with self.assertRaisesRegexp(Exception, r'Timed out after [-.e0-9]+s for values for \[\'tag1=value1\'\] tag2 test'):
store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
# test write_index
hostcpu = os.path.join(settings.WHISPER_DIR, 'hosts/hostname/cpu.wsp')
def create_whisper_hosts(self):
worker1 = self.hostcpu.replace('hostname', 'worker1')
worker2 = self.hostcpu.replace('hostname', 'worker2')
bogus_file = os.path.join(settings.WHISPER_DIR, 'a/b/c/bogus_file.txt')
try:
os.makedirs(worker1.replace('cpu.wsp', ''))
os.makedirs(worker2.replace('cpu.wsp', ''))
os.makedirs(bogus_file.replace('bogus_file.txt', ''))
except OSError:
pass
open(bogus_file, 'a').close()
whisper.create(worker1, [(1, 60)])
whisper.create(worker2, [(1, 60)])
ts = int(time.time())
whisper.update(worker1, 1, ts)
whisper.update(worker2, 2, ts)
def wipe_whisper_hosts(self):
try:
os.remove(self.hostcpu.replace('hostname', 'worker1'))
os.remove(self.hostcpu.replace('hostname', 'worker2'))
os.remove(os.path.join(settings.WHISPER_DIR, 'a/b/c/bogus_file.txt'))
shutil.rmtree(self.hostcpu.replace('hostname/cpu.wsp', ''))
shutil.rmtree(os.path.join(settings.WHISPER_DIR, 'a'))
except OSError:
pass
def test_write_index(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
self.assertEqual(None, write_index() )
self.assertEqual(None, write_index(settings.INDEX_FILE) )
class DisabledFinder(object):
disabled = True
def find_nodes(self, query):
pass
class LegacyFinder(object):
def find_nodes(self, query):
yield BranchNode('a')
yield BranchNode('a.b')
yield BranchNode('a.b.c')
yield LeafNode('a.b.c.d', DummyReader('a.b.c.d'))
class DummyReader(BaseReader):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, startTime, endTime, now=None, requestContext=None):
npoints = (endTime - startTime) // 10
return (startTime, endTime, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class RemoteFinder(BaseFinder):
local = False
def find_nodes(self, query):
yield BranchNode('a.b.c')
yield LeafNode('a.b.c.d', DummyReader('a.b.c.d'))
yield LeafNode('a.b.c.e', DummyReader('a.b.c.e'))
class TestFinder(BaseFinder):
def find_nodes(self, query):
raise Exception('TestFinder.find_nodes')
|
jasonseminara/OpenSourceFinal | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/connection.py | 679 | import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
|
open-power/eCMD | refs/heads/master | mkscripts/makeext.py | 1 | #!/usr/bin/env python3
import os
import sys
import errno
# A script to create the files generated by the configured extensions
# Configured extensions come from the EXTENSIONS environment variable
# Create the ecmdExtInterpreter.C
# This is a combo of generated code from this script and code snippets
# stored with the extension
# -----------------------
# cmd files
# -----------------------
if (sys.argv[1] == "cmd"):
# Pull EXT_CMD out of the environment
extlist = os.environ["EXT_CMD"].split()
# Open our file and start writing
extfile = open(os.environ["SRCPATH"] + "/ecmdExtInterpreter.C", 'w')
# Write out all the static stuff
extfile.write("//This file was autogenerated by makeext.py\n\n")
extfile.write("#include <inttypes.h>\n")
extfile.write("#include <dlfcn.h>\n")
extfile.write("#include <stdio.h>\n")
extfile.write("#include <string.h>\n")
extfile.write("\n")
extfile.write("#include <ecmdClientCapi.H>\n")
extfile.write("#include <ecmdExtInterpreter.H>\n")
extfile.write("#include <ecmdReturnCodes.H>\n")
extfile.write("#include <ecmdCommandUtils.H>\n")
extfile.write("#include <ecmdSharedUtils.H>\n\n")
# Now loop through all the extensions and write out their defines and includes
for ext in extlist:
extfile.write("#ifdef ECMD_" + ext.upper() + "_EXTENSION_SUPPORT\n")
extfile.write("#include <" + ext + "ClientCapi.H>\n")
extfile.write("#include <" + ext + "Interpreter.H>\n")
extfile.write("#endif\n\n")
# Write the function definition
extfile.write("uint32_t ecmdCallExtInterpreters(int argc, char* argv[], uint32_t & io_rc) {\n")
extfile.write(" uint32_t rc = ECMD_SUCCESS;\n\n")
# Now go through and suck in all the extension init code snippets
# Once read in, place it in the new output file
for ext in extlist:
snippetfile = open(os.environ["EXT_" + ext + "_PATH"] + "/cmd/snippet/callInterpreter.C", 'r')
for line in snippetfile.readlines():
extfile.write(line)
extfile.write("\n")
snippetfile.close()
# Write the end of the function and close the file
extfile.write(" return rc;\n")
extfile.write("}\n")
extfile.close()
# -----------------------
# Doxygen files
# -----------------------
if (sys.argv[1] == "doxygen"):
# arg1 is doxygen
# arg2 is the api
# arg3 is where to write the file
# Pull the right EXT list out of the env based on arg2
extlist = list()
if (sys.argv[2] == "capi"):
extlist = os.environ["EXT_CAPI"].split()
elif (sys.argv[2] == "perlapi"):
extlist = os.environ["EXT_PERLAPI"].split()
elif (sys.argv[2] == "pyapi"):
extlist = os.environ["EXT_PYAPI"].split()
else:
print("Unknown API \"%s\" passed in! Exiting.." % (sys.argv[2]))
exit(1)
# We're going to write out a header file to be used by doxygen to get the extensions
# Open our file and start writing
extfile = open(sys.argv[3] + "/ecmdExt" + sys.argv[2].title() + ".H", 'w')
# Write out all the static stuff
extfile.write("/**\n\n")
extfile.write(" @file ecmdExt" + sys.argv[2].title() + ".H\n")
extfile.write(" @brief eCMD Extension Information\n\n")
extfile.write(" @section ext eCMD Extensions\n")
extfile.write(" These are extensions to the core eCMD interface, not all eCMD Plugins support these extensions.<br>\n")
extfile.write(" To use an eCMD extension you will need to link in the appropriate library, see the example Makefile on main page.<br>\n\n")
extfile.write("<ul>\n")
# Now loop through all the extensions and write out their includes
for ext in extlist:
extfile.write("<li> " + ext + "Client" + sys.argv[2].title() + ".H\n")
# Write the end of the file
extfile.write("</ul>\n\n")
extfile.write("*/")
extfile.close()
# -----------------------
# Python files
# -----------------------
if (sys.argv[1] == "pyapi"):
# Pull EXT_PYAPI out of the environment
extlist = os.environ["EXT_PYAPI"].split()
# Open our file and start writing
extfile = open(os.environ["SRCPATH"] + "/ecmdExtPyIncludes.i", 'w')
extfile.write("/*********** Start Files to swigify ***********/\n")
extfile.write("// The extensions\n")
# Now auto generate the rest based on the list
for ext in extlist:
extfile.write("#ifdef ECMD_" + ext.upper() + "_EXTENSION_SUPPORT\n")
extfile.write(" %include " + ext + "ClientPyapi.i\n")
extfile.write("#endif\n")
extfile.write("/*********** End Files to swigify ***********/\n")
extfile.close()
# Open our file and start writing
extfile = open(os.environ["SRCPATH"] + "/ecmdExtPyInserts.i", 'w')
extfile.write("/*********** Start Insert Code ***********/\n")
extfile.write("// Insert C code into the file swig generates\n")
extfile.write("%{\n")
# Now auto generate the rest based on the list
for ext in extlist:
snippetfile = open(os.environ["EXT_" + ext + "_PATH"] + "/pyapi/snippet/extInsert.i", 'r')
for line in snippetfile.readlines():
extfile.write(line)
snippetfile.close()
extfile.write("\%}\n")
extfile.close()
# -----------------------
# Perl files
# -----------------------
if (sys.argv[1] == "perlapi"):
# Pull EXT_PERLAPI out of the environment
extlist = os.environ["EXT_PERLAPI"].split()
# Open our file and start writing
extfile = open(os.environ["SRCPATH"] + "/ecmdExtPerlIncludes.i", 'w')
extfile.write("/*********** Start Files to swigify ***********/\n")
extfile.write("// The extensions\n")
# Now auto generate the rest based on the list
for ext in extlist:
extfile.write("#ifdef ECMD_" + ext.upper() + "_EXTENSION_SUPPORT\n")
extfile.write(" %include " + ext + "ClientPerlapi.i\n")
extfile.write("#endif\n")
extfile.write("/*********** End Files to swigify ***********/\n")
extfile.close()
# Open our file and start writing
extfile = open(os.environ["SRCPATH"] + "/ecmdExtPerlInserts.i", 'w')
extfile.write("/*********** Start Insert Code ***********/\n")
extfile.write("// Insert C code into the file swig generates\n")
extfile.write("%{\n")
# Now auto generate the rest based on the list
for ext in extlist:
snippetfile = open(os.environ["EXT_" + ext + "_PATH"] + "/perlapi/snippet/extInsert.i", 'r')
for line in snippetfile.readlines():
extfile.write(line)
snippetfile.close()
extfile.write("\%}\n")
extfile.close()
|
ZuluPro/django-dbbackup | refs/heads/master | dbbackup/db/sqlite.py | 2 | from __future__ import unicode_literals
import warnings
from tempfile import SpooledTemporaryFile
from shutil import copyfileobj
from django.db import IntegrityError, OperationalError
from django.utils.six import BytesIO
from .base import BaseDBConnector
DUMP_TABLES = """
SELECT "name", "type", "sql"
FROM "sqlite_master"
WHERE "sql" NOT NULL AND "type" == 'table'
ORDER BY "name"
"""
DUMP_ETC = """
SELECT "name", "type", "sql"
FROM "sqlite_master"
WHERE "sql" NOT NULL AND "type" IN ('index', 'trigger', 'view')
"""
class SqliteConnector(BaseDBConnector):
"""
Create a dump at SQL layer like could make ``.dumps`` in sqlite3.
Restore by evaluate the created SQL.
"""
def _write_dump(self, fileobj):
cursor = self.connection.cursor()
cursor.execute(DUMP_TABLES)
for table_name, type, sql in cursor.fetchall():
if table_name.startswith('sqlite_') or table_name in self.exclude:
continue
elif sql.startswith('CREATE TABLE'):
# Make SQL commands in 1 line
sql = sql.replace('\n ', '')
sql = sql.replace('\n)', ')')
fileobj.write("{};\n".format(sql).encode('UTF-8'))
else:
fileobj.write("{};\n".format(sql))
table_name_ident = table_name.replace('"', '""')
res = cursor.execute('PRAGMA table_info("{0}")'.format(table_name_ident))
column_names = [str(table_info[1]) for table_info in res.fetchall()]
q = """SELECT 'INSERT INTO "{0}" VALUES({1})' FROM "{0}";\n""".format(
table_name_ident,
",".join("""'||quote("{0}")||'""".format(col.replace('"', '""'))
for col in column_names))
query_res = cursor.execute(q)
for row in query_res:
fileobj.write("{};\n".format(row[0]).encode('UTF-8'))
schema_res = cursor.execute(DUMP_ETC)
for name, type, sql in schema_res.fetchall():
if sql.startswith("CREATE INDEX"):
sql = sql.replace('CREATE INDEX', 'CREATE INDEX IF NOT EXISTS')
fileobj.write('{};\n'.format(sql).encode('UTF-8'))
cursor.close()
def create_dump(self):
if not self.connection.is_usable():
self.connection.connect()
dump_file = SpooledTemporaryFile(max_size=10 * 1024 * 1024)
self._write_dump(dump_file)
dump_file.seek(0)
return dump_file
def restore_dump(self, dump):
if not self.connection.is_usable():
self.connection.connect()
cursor = self.connection.cursor()
for line in dump.readlines():
try:
cursor.execute(line.decode('UTF-8'))
except OperationalError as err:
warnings.warn("Error in db restore: {}".format(err))
except IntegrityError as err:
warnings.warn("Error in db restore: {}".format(err))
class SqliteCPConnector(BaseDBConnector):
"""
Create a dump by copy the binary data file.
Restore by simply copy to the good location.
"""
def create_dump(self):
path = self.connection.settings_dict['NAME']
dump = BytesIO()
with open(path, 'rb') as db_file:
copyfileobj(db_file, dump)
dump.seek(0)
return dump
def restore_dump(self, dump):
path = self.connection.settings_dict['NAME']
with open(path, 'wb') as db_file:
copyfileobj(dump, db_file)
|
mmalyska/eve-wspace | refs/heads/develop | evewspace/staticdata_introspect.py | 27 | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from django.db import models
class Agtagenttypes(models.Model):
agenttypeid = models.IntegerField(primary_key=True, db_column='agentTypeID') # Field name made lowercase.
agenttype = models.CharField(max_length=150, db_column='agentType', blank=True) # Field name made lowercase.
class Meta:
db_table = u'agtAgentTypes'
class Agtagents(models.Model):
agentid = models.IntegerField(primary_key=True, db_column='agentID') # Field name made lowercase.
divisionid = models.IntegerField(null=True, db_column='divisionID', blank=True) # Field name made lowercase.
corporationid = models.IntegerField(null=True, db_column='corporationID', blank=True) # Field name made lowercase.
locationid = models.IntegerField(null=True, db_column='locationID', blank=True) # Field name made lowercase.
level = models.IntegerField(null=True, blank=True)
quality = models.IntegerField(null=True, blank=True)
agenttypeid = models.IntegerField(null=True, db_column='agentTypeID', blank=True) # Field name made lowercase.
islocator = models.IntegerField(null=True, db_column='isLocator', blank=True) # Field name made lowercase.
class Meta:
db_table = u'agtAgents'
class Agtresearchagents(models.Model):
agentid = models.IntegerField(primary_key=True, db_column='agentID') # Field name made lowercase.
typeid = models.IntegerField(db_column='typeID') # Field name made lowercase.
class Meta:
db_table = u'agtResearchAgents'
class Chrancestries(models.Model):
ancestryid = models.IntegerField(primary_key=True, db_column='ancestryID') # Field name made lowercase.
ancestryname = models.CharField(max_length=300, db_column='ancestryName', blank=True) # Field name made lowercase.
bloodlineid = models.IntegerField(null=True, db_column='bloodlineID', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
perception = models.IntegerField(null=True, blank=True)
willpower = models.IntegerField(null=True, blank=True)
charisma = models.IntegerField(null=True, blank=True)
memory = models.IntegerField(null=True, blank=True)
intelligence = models.IntegerField(null=True, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
shortdescription = models.CharField(max_length=1500, db_column='shortDescription', blank=True) # Field name made lowercase.
class Meta:
db_table = u'chrAncestries'
class Chrattributes(models.Model):
attributeid = models.IntegerField(primary_key=True, db_column='attributeID') # Field name made lowercase.
attributename = models.CharField(max_length=300, db_column='attributeName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
shortdescription = models.CharField(max_length=1500, db_column='shortDescription', blank=True) # Field name made lowercase.
notes = models.CharField(max_length=1500, blank=True)
class Meta:
db_table = u'chrAttributes'
class Chrbloodlines(models.Model):
bloodlineid = models.IntegerField(primary_key=True, db_column='bloodlineID') # Field name made lowercase.
bloodlinename = models.CharField(max_length=300, db_column='bloodlineName', blank=True) # Field name made lowercase.
raceid = models.IntegerField(null=True, db_column='raceID', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
maledescription = models.CharField(max_length=3000, db_column='maleDescription', blank=True) # Field name made lowercase.
femaledescription = models.CharField(max_length=3000, db_column='femaleDescription', blank=True) # Field name made lowercase.
shiptypeid = models.IntegerField(null=True, db_column='shipTypeID', blank=True) # Field name made lowercase.
corporationid = models.IntegerField(null=True, db_column='corporationID', blank=True) # Field name made lowercase.
perception = models.IntegerField(null=True, blank=True)
willpower = models.IntegerField(null=True, blank=True)
charisma = models.IntegerField(null=True, blank=True)
memory = models.IntegerField(null=True, blank=True)
intelligence = models.IntegerField(null=True, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
shortdescription = models.CharField(max_length=1500, db_column='shortDescription', blank=True) # Field name made lowercase.
shortmaledescription = models.CharField(max_length=1500, db_column='shortMaleDescription', blank=True) # Field name made lowercase.
shortfemaledescription = models.CharField(max_length=1500, db_column='shortFemaleDescription', blank=True) # Field name made lowercase.
class Meta:
db_table = u'chrBloodlines'
class Chrfactions(models.Model):
factionid = models.IntegerField(primary_key=True, db_column='factionID') # Field name made lowercase.
factionname = models.CharField(max_length=300, db_column='factionName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
raceids = models.IntegerField(null=True, db_column='raceIDs', blank=True) # Field name made lowercase.
solarsystemid = models.IntegerField(null=True, db_column='solarSystemID', blank=True) # Field name made lowercase.
corporationid = models.IntegerField(null=True, db_column='corporationID', blank=True) # Field name made lowercase.
sizefactor = models.FloatField(null=True, db_column='sizeFactor', blank=True) # Field name made lowercase.
stationcount = models.IntegerField(null=True, db_column='stationCount', blank=True) # Field name made lowercase.
stationsystemcount = models.IntegerField(null=True, db_column='stationSystemCount', blank=True) # Field name made lowercase.
militiacorporationid = models.IntegerField(null=True, db_column='militiaCorporationID', blank=True) # Field name made lowercase.
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'chrFactions'
class Chrraces(models.Model):
raceid = models.IntegerField(primary_key=True, db_column='raceID') # Field name made lowercase.
racename = models.CharField(max_length=300, db_column='raceName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
shortdescription = models.CharField(max_length=1500, db_column='shortDescription', blank=True) # Field name made lowercase.
class Meta:
db_table = u'chrRaces'
class Crpactivities(models.Model):
activityid = models.IntegerField(primary_key=True, db_column='activityID') # Field name made lowercase.
activityname = models.CharField(max_length=300, db_column='activityName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
class Meta:
db_table = u'crpActivities'
class Crpnpccorporationdivisions(models.Model):
corporationid = models.IntegerField(primary_key=True, db_column='corporationID') # Field name made lowercase.
divisionid = models.IntegerField(primary_key=True, db_column='divisionID') # Field name made lowercase.
size = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'crpNPCCorporationDivisions'
class Crpnpccorporationresearchfields(models.Model):
skillid = models.IntegerField(primary_key=True, db_column='skillID') # Field name made lowercase.
corporationid = models.IntegerField(primary_key=True, db_column='corporationID') # Field name made lowercase.
class Meta:
db_table = u'crpNPCCorporationResearchFields'
class Crpnpccorporationtrades(models.Model):
corporationid = models.IntegerField(primary_key=True, db_column='corporationID') # Field name made lowercase.
typeid = models.IntegerField(primary_key=True, db_column='typeID') # Field name made lowercase.
class Meta:
db_table = u'crpNPCCorporationTrades'
class Crpnpccorporations(models.Model):
corporationid = models.IntegerField(primary_key=True, db_column='corporationID') # Field name made lowercase.
size = models.CharField(max_length=3, blank=True)
extent = models.CharField(max_length=3, blank=True)
solarsystemid = models.IntegerField(null=True, db_column='solarSystemID', blank=True) # Field name made lowercase.
investorid1 = models.IntegerField(null=True, db_column='investorID1', blank=True) # Field name made lowercase.
investorshares1 = models.IntegerField(null=True, db_column='investorShares1', blank=True) # Field name made lowercase.
investorid2 = models.IntegerField(null=True, db_column='investorID2', blank=True) # Field name made lowercase.
investorshares2 = models.IntegerField(null=True, db_column='investorShares2', blank=True) # Field name made lowercase.
investorid3 = models.IntegerField(null=True, db_column='investorID3', blank=True) # Field name made lowercase.
investorshares3 = models.IntegerField(null=True, db_column='investorShares3', blank=True) # Field name made lowercase.
investorid4 = models.IntegerField(null=True, db_column='investorID4', blank=True) # Field name made lowercase.
investorshares4 = models.IntegerField(null=True, db_column='investorShares4', blank=True) # Field name made lowercase.
friendid = models.IntegerField(null=True, db_column='friendID', blank=True) # Field name made lowercase.
enemyid = models.IntegerField(null=True, db_column='enemyID', blank=True) # Field name made lowercase.
publicshares = models.BigIntegerField(null=True, db_column='publicShares', blank=True) # Field name made lowercase.
initialprice = models.IntegerField(null=True, db_column='initialPrice', blank=True) # Field name made lowercase.
minsecurity = models.FloatField(null=True, db_column='minSecurity', blank=True) # Field name made lowercase.
scattered = models.IntegerField(null=True, blank=True)
fringe = models.IntegerField(null=True, blank=True)
corridor = models.IntegerField(null=True, blank=True)
hub = models.IntegerField(null=True, blank=True)
border = models.IntegerField(null=True, blank=True)
factionid = models.IntegerField(null=True, db_column='factionID', blank=True) # Field name made lowercase.
sizefactor = models.FloatField(null=True, db_column='sizeFactor', blank=True) # Field name made lowercase.
stationcount = models.IntegerField(null=True, db_column='stationCount', blank=True) # Field name made lowercase.
stationsystemcount = models.IntegerField(null=True, db_column='stationSystemCount', blank=True) # Field name made lowercase.
description = models.CharField(max_length=12000, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'crpNPCCorporations'
class Crpnpcdivisions(models.Model):
divisionid = models.IntegerField(primary_key=True, db_column='divisionID') # Field name made lowercase.
divisionname = models.CharField(max_length=300, db_column='divisionName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
leadertype = models.CharField(max_length=300, db_column='leaderType', blank=True) # Field name made lowercase.
class Meta:
db_table = u'crpNPCDivisions'
class Crtcategories(models.Model):
categoryid = models.IntegerField(primary_key=True, db_column='categoryID') # Field name made lowercase.
description = models.CharField(max_length=1500, blank=True)
categoryname = models.CharField(max_length=768, db_column='categoryName', blank=True) # Field name made lowercase.
class Meta:
db_table = u'crtCategories'
class Crtcertificates(models.Model):
certificateid = models.IntegerField(primary_key=True, db_column='certificateID') # Field name made lowercase.
categoryid = models.IntegerField(null=True, db_column='categoryID', blank=True) # Field name made lowercase.
classid = models.IntegerField(null=True, db_column='classID', blank=True) # Field name made lowercase.
grade = models.IntegerField(null=True, blank=True)
corpid = models.IntegerField(null=True, db_column='corpID', blank=True) # Field name made lowercase.
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
description = models.CharField(max_length=1500, blank=True)
class Meta:
db_table = u'crtCertificates'
class Crtclasses(models.Model):
classid = models.IntegerField(primary_key=True, db_column='classID') # Field name made lowercase.
description = models.CharField(max_length=1500, blank=True)
classname = models.CharField(max_length=768, db_column='className', blank=True) # Field name made lowercase.
class Meta:
db_table = u'crtClasses'
class Crtrecommendations(models.Model):
recommendationid = models.IntegerField(primary_key=True, db_column='recommendationID') # Field name made lowercase.
shiptypeid = models.IntegerField(null=True, db_column='shipTypeID', blank=True) # Field name made lowercase.
certificateid = models.IntegerField(null=True, db_column='certificateID', blank=True) # Field name made lowercase.
recommendationlevel = models.IntegerField(db_column='recommendationLevel') # Field name made lowercase.
class Meta:
db_table = u'crtRecommendations'
class Crtrelationships(models.Model):
relationshipid = models.IntegerField(primary_key=True, db_column='relationshipID') # Field name made lowercase.
parentid = models.IntegerField(null=True, db_column='parentID', blank=True) # Field name made lowercase.
parenttypeid = models.IntegerField(null=True, db_column='parentTypeID', blank=True) # Field name made lowercase.
parentlevel = models.IntegerField(null=True, db_column='parentLevel', blank=True) # Field name made lowercase.
childid = models.IntegerField(null=True, db_column='childID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'crtRelationships'
class Dgmattributecategories(models.Model):
categoryid = models.IntegerField(primary_key=True, db_column='categoryID') # Field name made lowercase.
categoryname = models.CharField(max_length=150, db_column='categoryName', blank=True) # Field name made lowercase.
categorydescription = models.CharField(max_length=600, db_column='categoryDescription', blank=True) # Field name made lowercase.
class Meta:
db_table = u'dgmAttributeCategories'
class Dgmattributetypes(models.Model):
attributeid = models.IntegerField(primary_key=True, db_column='attributeID') # Field name made lowercase.
attributename = models.CharField(max_length=300, db_column='attributeName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
defaultvalue = models.FloatField(null=True, db_column='defaultValue', blank=True) # Field name made lowercase.
published = models.IntegerField(null=True, blank=True)
displayname = models.CharField(max_length=300, db_column='displayName', blank=True) # Field name made lowercase.
unitid = models.IntegerField(null=True, db_column='unitID', blank=True) # Field name made lowercase.
stackable = models.IntegerField(null=True, blank=True)
highisgood = models.IntegerField(null=True, db_column='highIsGood', blank=True) # Field name made lowercase.
categoryid = models.IntegerField(null=True, db_column='categoryID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'dgmAttributeTypes'
class Dgmeffects(models.Model):
effectid = models.IntegerField(primary_key=True, db_column='effectID') # Field name made lowercase.
effectname = models.CharField(max_length=1200, db_column='effectName', blank=True) # Field name made lowercase.
effectcategory = models.IntegerField(null=True, db_column='effectCategory', blank=True) # Field name made lowercase.
preexpression = models.IntegerField(null=True, db_column='preExpression', blank=True) # Field name made lowercase.
postexpression = models.IntegerField(null=True, db_column='postExpression', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
guid = models.CharField(max_length=180, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
isoffensive = models.IntegerField(null=True, db_column='isOffensive', blank=True) # Field name made lowercase.
isassistance = models.IntegerField(null=True, db_column='isAssistance', blank=True) # Field name made lowercase.
durationattributeid = models.IntegerField(null=True, db_column='durationAttributeID', blank=True) # Field name made lowercase.
trackingspeedattributeid = models.IntegerField(null=True, db_column='trackingSpeedAttributeID', blank=True) # Field name made lowercase.
dischargeattributeid = models.IntegerField(null=True, db_column='dischargeAttributeID', blank=True) # Field name made lowercase.
rangeattributeid = models.IntegerField(null=True, db_column='rangeAttributeID', blank=True) # Field name made lowercase.
falloffattributeid = models.IntegerField(null=True, db_column='falloffAttributeID', blank=True) # Field name made lowercase.
disallowautorepeat = models.IntegerField(null=True, db_column='disallowAutoRepeat', blank=True) # Field name made lowercase.
published = models.IntegerField(null=True, blank=True)
displayname = models.CharField(max_length=300, db_column='displayName', blank=True) # Field name made lowercase.
iswarpsafe = models.IntegerField(null=True, db_column='isWarpSafe', blank=True) # Field name made lowercase.
rangechance = models.IntegerField(null=True, db_column='rangeChance', blank=True) # Field name made lowercase.
electronicchance = models.IntegerField(null=True, db_column='electronicChance', blank=True) # Field name made lowercase.
propulsionchance = models.IntegerField(null=True, db_column='propulsionChance', blank=True) # Field name made lowercase.
distribution = models.IntegerField(null=True, blank=True)
sfxname = models.CharField(max_length=60, db_column='sfxName', blank=True) # Field name made lowercase.
npcusagechanceattributeid = models.IntegerField(null=True, db_column='npcUsageChanceAttributeID', blank=True) # Field name made lowercase.
npcactivationchanceattributeid = models.IntegerField(null=True, db_column='npcActivationChanceAttributeID', blank=True) # Field name made lowercase.
fittingusagechanceattributeid = models.IntegerField(null=True, db_column='fittingUsageChanceAttributeID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'dgmEffects'
class Dgmtypeattributes(models.Model):
typeid = models.IntegerField(primary_key=True, db_column='typeID') # Field name made lowercase.
attributeid = models.IntegerField(primary_key=True, db_column='attributeID') # Field name made lowercase.
valueint = models.IntegerField(null=True, db_column='valueInt', blank=True) # Field name made lowercase.
valuefloat = models.FloatField(null=True, db_column='valueFloat', blank=True) # Field name made lowercase.
class Meta:
db_table = u'dgmTypeAttributes'
class Dgmtypeeffects(models.Model):
typeid = models.IntegerField(primary_key=True, db_column='typeID') # Field name made lowercase.
effectid = models.IntegerField(primary_key=True, db_column='effectID') # Field name made lowercase.
isdefault = models.IntegerField(null=True, db_column='isDefault', blank=True) # Field name made lowercase.
class Meta:
db_table = u'dgmTypeEffects'
class Evegraphics(models.Model):
graphicid = models.IntegerField(primary_key=True, db_column='graphicID') # Field name made lowercase.
graphicfile = models.CharField(max_length=1500, db_column='graphicFile') # Field name made lowercase.
description = models.TextField(blank=True)
obsolete = models.IntegerField(null=True, blank=True)
graphictype = models.CharField(max_length=300, db_column='graphicType', blank=True) # Field name made lowercase.
collidable = models.IntegerField(null=True, blank=True)
explosionid = models.IntegerField(null=True, db_column='explosionID', blank=True) # Field name made lowercase.
directoryid = models.IntegerField(null=True, db_column='directoryID', blank=True) # Field name made lowercase.
graphicname = models.TextField(db_column='graphicName', blank=True) # Field name made lowercase.
class Meta:
db_table = u'eveGraphics'
class Eveicons(models.Model):
iconid = models.IntegerField(primary_key=True, db_column='iconID') # Field name made lowercase.
iconfile = models.CharField(max_length=1500, db_column='iconFile') # Field name made lowercase.
description = models.TextField(blank=True)
class Meta:
db_table = u'eveIcons'
class Eveunits(models.Model):
unitid = models.IntegerField(primary_key=True, db_column='unitID') # Field name made lowercase.
unitname = models.CharField(max_length=300, db_column='unitName', blank=True) # Field name made lowercase.
displayname = models.CharField(max_length=150, db_column='displayName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
class Meta:
db_table = u'eveUnits'
class Invblueprinttypes(models.Model):
blueprinttypeid = models.IntegerField(primary_key=True, db_column='blueprintTypeID') # Field name made lowercase.
parentblueprinttypeid = models.IntegerField(null=True, db_column='parentBlueprintTypeID', blank=True) # Field name made lowercase.
producttypeid = models.IntegerField(null=True, db_column='productTypeID', blank=True) # Field name made lowercase.
productiontime = models.IntegerField(null=True, db_column='productionTime', blank=True) # Field name made lowercase.
techlevel = models.IntegerField(null=True, db_column='techLevel', blank=True) # Field name made lowercase.
researchproductivitytime = models.IntegerField(null=True, db_column='researchProductivityTime', blank=True) # Field name made lowercase.
researchmaterialtime = models.IntegerField(null=True, db_column='researchMaterialTime', blank=True) # Field name made lowercase.
researchcopytime = models.IntegerField(null=True, db_column='researchCopyTime', blank=True) # Field name made lowercase.
researchtechtime = models.IntegerField(null=True, db_column='researchTechTime', blank=True) # Field name made lowercase.
productivitymodifier = models.IntegerField(null=True, db_column='productivityModifier', blank=True) # Field name made lowercase.
materialmodifier = models.IntegerField(null=True, db_column='materialModifier', blank=True) # Field name made lowercase.
wastefactor = models.IntegerField(null=True, db_column='wasteFactor', blank=True) # Field name made lowercase.
maxproductionlimit = models.IntegerField(null=True, db_column='maxProductionLimit', blank=True) # Field name made lowercase.
class Meta:
db_table = u'invBlueprintTypes'
class Invcategories(models.Model):
categoryid = models.IntegerField(primary_key=True, db_column='categoryID') # Field name made lowercase.
categoryname = models.CharField(max_length=300, db_column='categoryName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=9000, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
published = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'invCategories'
class Invcontrabandtypes(models.Model):
factionid = models.IntegerField(primary_key=True, db_column='factionID') # Field name made lowercase.
typeid = models.IntegerField(db_column='typeID') # Field name made lowercase.
standingloss = models.FloatField(null=True, db_column='standingLoss', blank=True) # Field name made lowercase.
confiscateminsec = models.FloatField(null=True, db_column='confiscateMinSec', blank=True) # Field name made lowercase.
finebyvalue = models.FloatField(null=True, db_column='fineByValue', blank=True) # Field name made lowercase.
attackminsec = models.FloatField(null=True, db_column='attackMinSec', blank=True) # Field name made lowercase.
class Meta:
db_table = u'invContrabandTypes'
class Invcontroltowerresourcepurposes(models.Model):
purpose = models.IntegerField(primary_key=True)
purposetext = models.CharField(max_length=300, db_column='purposeText', blank=True) # Field name made lowercase.
class Meta:
db_table = u'invControlTowerResourcePurposes'
class Invcontroltowerresources(models.Model):
controltowertypeid = models.IntegerField(primary_key=True, db_column='controlTowerTypeID') # Field name made lowercase.
resourcetypeid = models.IntegerField(primary_key=True, db_column='resourceTypeID') # Field name made lowercase.
purpose = models.IntegerField(null=True, blank=True)
quantity = models.IntegerField(null=True, blank=True)
minsecuritylevel = models.FloatField(null=True, db_column='minSecurityLevel', blank=True) # Field name made lowercase.
factionid = models.IntegerField(null=True, db_column='factionID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'invControlTowerResources'
class Invflags(models.Model):
flagid = models.IntegerField(primary_key=True, db_column='flagID') # Field name made lowercase.
flagname = models.CharField(max_length=600, db_column='flagName', blank=True) # Field name made lowercase.
flagtext = models.CharField(max_length=300, db_column='flagText', blank=True) # Field name made lowercase.
orderid = models.IntegerField(null=True, db_column='orderID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'invFlags'
class Invgroups(models.Model):
groupid = models.IntegerField(primary_key=True, db_column='groupID') # Field name made lowercase.
categoryid = models.IntegerField(null=True, db_column='categoryID', blank=True) # Field name made lowercase.
groupname = models.CharField(max_length=300, db_column='groupName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=9000, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
usebaseprice = models.IntegerField(null=True, db_column='useBasePrice', blank=True) # Field name made lowercase.
allowmanufacture = models.IntegerField(null=True, db_column='allowManufacture', blank=True) # Field name made lowercase.
allowrecycler = models.IntegerField(null=True, db_column='allowRecycler', blank=True) # Field name made lowercase.
anchored = models.IntegerField(null=True, blank=True)
anchorable = models.IntegerField(null=True, blank=True)
fittablenonsingleton = models.IntegerField(null=True, db_column='fittableNonSingleton', blank=True) # Field name made lowercase.
published = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'invGroups'
class Invitems(models.Model):
itemid = models.BigIntegerField(primary_key=True, db_column='itemID') # Field name made lowercase.
typeid = models.IntegerField(db_column='typeID') # Field name made lowercase.
ownerid = models.IntegerField(db_column='ownerID') # Field name made lowercase.
locationid = models.BigIntegerField(db_column='locationID') # Field name made lowercase.
flagid = models.IntegerField(db_column='flagID') # Field name made lowercase.
quantity = models.IntegerField()
class Meta:
db_table = u'invItems'
class Invmarketgroups(models.Model):
marketgroupid = models.IntegerField(primary_key=True, db_column='marketGroupID') # Field name made lowercase.
parentgroupid = models.IntegerField(null=True, db_column='parentGroupID', blank=True) # Field name made lowercase.
marketgroupname = models.CharField(max_length=300, db_column='marketGroupName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=9000, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
hastypes = models.IntegerField(null=True, db_column='hasTypes', blank=True) # Field name made lowercase.
class Meta:
db_table = u'invMarketGroups'
class Invmetagroups(models.Model):
metagroupid = models.IntegerField(primary_key=True, db_column='metaGroupID') # Field name made lowercase.
metagroupname = models.CharField(max_length=300, db_column='metaGroupName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'invMetaGroups'
class Invmetatypes(models.Model):
typeid = models.IntegerField(primary_key=True, db_column='typeID') # Field name made lowercase.
parenttypeid = models.IntegerField(null=True, db_column='parentTypeID', blank=True) # Field name made lowercase.
metagroupid = models.IntegerField(null=True, db_column='metaGroupID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'invMetaTypes'
class Invnames(models.Model):
itemid = models.BigIntegerField(primary_key=True, db_column='itemID') # Field name made lowercase.
itemname = models.CharField(max_length=600, db_column='itemName') # Field name made lowercase.
class Meta:
db_table = u'invNames'
class Invpositions(models.Model):
itemid = models.BigIntegerField(primary_key=True, db_column='itemID') # Field name made lowercase.
x = models.FloatField()
y = models.FloatField()
z = models.FloatField()
yaw = models.FloatField(null=True, blank=True)
pitch = models.FloatField(null=True, blank=True)
roll = models.FloatField(null=True, blank=True)
class Meta:
db_table = u'invPositions'
class Invtypematerials(models.Model):
typeid = models.IntegerField(primary_key=True, db_column='typeID') # Field name made lowercase.
materialtypeid = models.IntegerField(primary_key=True, db_column='materialTypeID') # Field name made lowercase.
quantity = models.IntegerField()
class Meta:
db_table = u'invTypeMaterials'
class Invtypereactions(models.Model):
reactiontypeid = models.IntegerField(primary_key=True, db_column='reactionTypeID') # Field name made lowercase.
input = models.IntegerField(primary_key=True)
typeid = models.IntegerField(primary_key=True, db_column='typeID') # Field name made lowercase.
quantity = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'invTypeReactions'
class Invtypes(models.Model):
typeid = models.IntegerField(primary_key=True, db_column='typeID') # Field name made lowercase.
groupid = models.IntegerField(null=True, db_column='groupID', blank=True) # Field name made lowercase.
typename = models.CharField(max_length=300, db_column='typeName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=9000, blank=True)
radius = models.FloatField(null=True, blank=True)
mass = models.FloatField(null=True, blank=True)
volume = models.FloatField(null=True, blank=True)
capacity = models.FloatField(null=True, blank=True)
portionsize = models.IntegerField(null=True, db_column='portionSize', blank=True) # Field name made lowercase.
raceid = models.IntegerField(null=True, db_column='raceID', blank=True) # Field name made lowercase.
baseprice = models.DecimalField(decimal_places=4, null=True, max_digits=21, db_column='basePrice', blank=True) # Field name made lowercase.
published = models.IntegerField(null=True, blank=True)
marketgroupid = models.IntegerField(null=True, db_column='marketGroupID', blank=True) # Field name made lowercase.
chanceofduplicating = models.FloatField(null=True, db_column='chanceOfDuplicating', blank=True) # Field name made lowercase.
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'invTypes'
class Invuniquenames(models.Model):
itemid = models.IntegerField(primary_key=True, db_column='itemID') # Field name made lowercase.
itemname = models.CharField(max_length=600, db_column='itemName') # Field name made lowercase.
groupid = models.IntegerField(null=True, db_column='groupID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'invUniqueNames'
class Mapcelestialstatistics(models.Model):
celestialid = models.IntegerField(primary_key=True, db_column='celestialID') # Field name made lowercase.
temperature = models.FloatField(null=True, blank=True)
spectralclass = models.CharField(max_length=30, db_column='spectralClass', blank=True) # Field name made lowercase.
luminosity = models.FloatField(null=True, blank=True)
age = models.FloatField(null=True, blank=True)
life = models.FloatField(null=True, blank=True)
orbitradius = models.FloatField(null=True, db_column='orbitRadius', blank=True) # Field name made lowercase.
eccentricity = models.FloatField(null=True, blank=True)
massdust = models.FloatField(null=True, db_column='massDust', blank=True) # Field name made lowercase.
massgas = models.FloatField(null=True, db_column='massGas', blank=True) # Field name made lowercase.
fragmented = models.IntegerField(null=True, blank=True)
density = models.FloatField(null=True, blank=True)
surfacegravity = models.FloatField(null=True, db_column='surfaceGravity', blank=True) # Field name made lowercase.
escapevelocity = models.FloatField(null=True, db_column='escapeVelocity', blank=True) # Field name made lowercase.
orbitperiod = models.FloatField(null=True, db_column='orbitPeriod', blank=True) # Field name made lowercase.
rotationrate = models.FloatField(null=True, db_column='rotationRate', blank=True) # Field name made lowercase.
locked = models.IntegerField(null=True, blank=True)
pressure = models.FloatField(null=True, blank=True)
radius = models.FloatField(null=True, blank=True)
mass = models.FloatField(null=True, blank=True)
class Meta:
db_table = u'mapCelestialStatistics'
class Mapconstellationjumps(models.Model):
fromregionid = models.IntegerField(null=True, db_column='fromRegionID', blank=True) # Field name made lowercase.
fromconstellationid = models.IntegerField(primary_key=True, db_column='fromConstellationID') # Field name made lowercase.
toconstellationid = models.IntegerField(primary_key=True, db_column='toConstellationID') # Field name made lowercase.
toregionid = models.IntegerField(null=True, db_column='toRegionID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'mapConstellationJumps'
class Mapconstellations(models.Model):
regionid = models.IntegerField(null=True, db_column='regionID', blank=True) # Field name made lowercase.
constellationid = models.IntegerField(primary_key=True, db_column='constellationID') # Field name made lowercase.
constellationname = models.CharField(max_length=300, db_column='constellationName', blank=True) # Field name made lowercase.
x = models.FloatField(null=True, blank=True)
y = models.FloatField(null=True, blank=True)
z = models.FloatField(null=True, blank=True)
xmin = models.FloatField(null=True, db_column='xMin', blank=True) # Field name made lowercase.
xmax = models.FloatField(null=True, db_column='xMax', blank=True) # Field name made lowercase.
ymin = models.FloatField(null=True, db_column='yMin', blank=True) # Field name made lowercase.
ymax = models.FloatField(null=True, db_column='yMax', blank=True) # Field name made lowercase.
zmin = models.FloatField(null=True, db_column='zMin', blank=True) # Field name made lowercase.
zmax = models.FloatField(null=True, db_column='zMax', blank=True) # Field name made lowercase.
factionid = models.IntegerField(null=True, db_column='factionID', blank=True) # Field name made lowercase.
radius = models.FloatField(null=True, blank=True)
class Meta:
db_table = u'mapConstellations'
class Mapdenormalize(models.Model):
itemid = models.IntegerField(primary_key=True, db_column='itemID') # Field name made lowercase.
typeid = models.IntegerField(null=True, db_column='typeID', blank=True) # Field name made lowercase.
groupid = models.IntegerField(null=True, db_column='groupID', blank=True) # Field name made lowercase.
solarsystemid = models.IntegerField(null=True, db_column='solarSystemID', blank=True) # Field name made lowercase.
constellationid = models.IntegerField(null=True, db_column='constellationID', blank=True) # Field name made lowercase.
regionid = models.IntegerField(null=True, db_column='regionID', blank=True) # Field name made lowercase.
orbitid = models.IntegerField(null=True, db_column='orbitID', blank=True) # Field name made lowercase.
x = models.FloatField(null=True, blank=True)
y = models.FloatField(null=True, blank=True)
z = models.FloatField(null=True, blank=True)
radius = models.FloatField(null=True, blank=True)
itemname = models.CharField(max_length=300, db_column='itemName', blank=True) # Field name made lowercase.
security = models.FloatField(null=True, blank=True)
celestialindex = models.IntegerField(null=True, db_column='celestialIndex', blank=True) # Field name made lowercase.
orbitindex = models.IntegerField(null=True, db_column='orbitIndex', blank=True) # Field name made lowercase.
class Meta:
db_table = u'mapDenormalize'
class Mapjumps(models.Model):
stargateid = models.IntegerField(primary_key=True, db_column='stargateID') # Field name made lowercase.
celestialid = models.IntegerField(null=True, db_column='celestialID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'mapJumps'
class Maplandmarks(models.Model):
landmarkid = models.IntegerField(primary_key=True, db_column='landmarkID') # Field name made lowercase.
landmarkname = models.CharField(max_length=300, db_column='landmarkName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=21000, blank=True)
locationid = models.IntegerField(null=True, db_column='locationID', blank=True) # Field name made lowercase.
x = models.FloatField(null=True, blank=True)
y = models.FloatField(null=True, blank=True)
z = models.FloatField(null=True, blank=True)
radius = models.FloatField(null=True, blank=True)
iconid = models.IntegerField(null=True, db_column='iconID', blank=True) # Field name made lowercase.
importance = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'mapLandmarks'
class Maplocationscenes(models.Model):
locationid = models.IntegerField(primary_key=True, db_column='locationID') # Field name made lowercase.
graphicid = models.IntegerField(null=True, db_column='graphicID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'mapLocationScenes'
class Maplocationwormholeclasses(models.Model):
locationid = models.IntegerField(primary_key=True, db_column='locationID') # Field name made lowercase.
wormholeclassid = models.IntegerField(null=True, db_column='wormholeClassID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'mapLocationWormholeClasses'
class Mapregionjumps(models.Model):
fromregionid = models.IntegerField(primary_key=True, db_column='fromRegionID') # Field name made lowercase.
toregionid = models.IntegerField(primary_key=True, db_column='toRegionID') # Field name made lowercase.
class Meta:
db_table = u'mapRegionJumps'
class Mapregions(models.Model):
regionid = models.IntegerField(primary_key=True, db_column='regionID') # Field name made lowercase.
regionname = models.CharField(max_length=300, db_column='regionName', blank=True) # Field name made lowercase.
x = models.FloatField(null=True, blank=True)
y = models.FloatField(null=True, blank=True)
z = models.FloatField(null=True, blank=True)
xmin = models.FloatField(null=True, db_column='xMin', blank=True) # Field name made lowercase.
xmax = models.FloatField(null=True, db_column='xMax', blank=True) # Field name made lowercase.
ymin = models.FloatField(null=True, db_column='yMin', blank=True) # Field name made lowercase.
ymax = models.FloatField(null=True, db_column='yMax', blank=True) # Field name made lowercase.
zmin = models.FloatField(null=True, db_column='zMin', blank=True) # Field name made lowercase.
zmax = models.FloatField(null=True, db_column='zMax', blank=True) # Field name made lowercase.
factionid = models.IntegerField(null=True, db_column='factionID', blank=True) # Field name made lowercase.
radius = models.FloatField(null=True, blank=True)
class Meta:
db_table = u'mapRegions'
class Mapsolarsystemjumps(models.Model):
fromregionid = models.IntegerField(null=True, db_column='fromRegionID', blank=True) # Field name made lowercase.
fromconstellationid = models.IntegerField(null=True, db_column='fromConstellationID', blank=True) # Field name made lowercase.
fromsolarsystemid = models.IntegerField(primary_key=True, db_column='fromSolarSystemID') # Field name made lowercase.
tosolarsystemid = models.IntegerField(primary_key=True, db_column='toSolarSystemID') # Field name made lowercase.
toconstellationid = models.IntegerField(null=True, db_column='toConstellationID', blank=True) # Field name made lowercase.
toregionid = models.IntegerField(null=True, db_column='toRegionID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'mapSolarSystemJumps'
class Mapsolarsystems(models.Model):
regionid = models.IntegerField(null=True, db_column='regionID', blank=True) # Field name made lowercase.
constellationid = models.IntegerField(null=True, db_column='constellationID', blank=True) # Field name made lowercase.
solarsystemid = models.IntegerField(primary_key=True, db_column='solarSystemID') # Field name made lowercase.
solarsystemname = models.CharField(max_length=300, db_column='solarSystemName', blank=True) # Field name made lowercase.
x = models.FloatField(null=True, blank=True)
y = models.FloatField(null=True, blank=True)
z = models.FloatField(null=True, blank=True)
xmin = models.FloatField(null=True, db_column='xMin', blank=True) # Field name made lowercase.
xmax = models.FloatField(null=True, db_column='xMax', blank=True) # Field name made lowercase.
ymin = models.FloatField(null=True, db_column='yMin', blank=True) # Field name made lowercase.
ymax = models.FloatField(null=True, db_column='yMax', blank=True) # Field name made lowercase.
zmin = models.FloatField(null=True, db_column='zMin', blank=True) # Field name made lowercase.
zmax = models.FloatField(null=True, db_column='zMax', blank=True) # Field name made lowercase.
luminosity = models.FloatField(null=True, blank=True)
border = models.IntegerField(null=True, blank=True)
fringe = models.IntegerField(null=True, blank=True)
corridor = models.IntegerField(null=True, blank=True)
hub = models.IntegerField(null=True, blank=True)
international = models.IntegerField(null=True, blank=True)
regional = models.IntegerField(null=True, blank=True)
constellation = models.IntegerField(null=True, blank=True)
security = models.FloatField(null=True, blank=True)
factionid = models.IntegerField(null=True, db_column='factionID', blank=True) # Field name made lowercase.
radius = models.FloatField(null=True, blank=True)
suntypeid = models.IntegerField(null=True, db_column='sunTypeID', blank=True) # Field name made lowercase.
securityclass = models.CharField(max_length=6, db_column='securityClass', blank=True) # Field name made lowercase.
class Meta:
db_table = u'mapSolarSystems'
class Mapuniverse(models.Model):
universeid = models.IntegerField(primary_key=True, db_column='universeID') # Field name made lowercase.
universename = models.CharField(max_length=300, db_column='universeName', blank=True) # Field name made lowercase.
x = models.FloatField(null=True, blank=True)
y = models.FloatField(null=True, blank=True)
z = models.FloatField(null=True, blank=True)
xmin = models.FloatField(null=True, db_column='xMin', blank=True) # Field name made lowercase.
xmax = models.FloatField(null=True, db_column='xMax', blank=True) # Field name made lowercase.
ymin = models.FloatField(null=True, db_column='yMin', blank=True) # Field name made lowercase.
ymax = models.FloatField(null=True, db_column='yMax', blank=True) # Field name made lowercase.
zmin = models.FloatField(null=True, db_column='zMin', blank=True) # Field name made lowercase.
zmax = models.FloatField(null=True, db_column='zMax', blank=True) # Field name made lowercase.
radius = models.FloatField(null=True, blank=True)
class Meta:
db_table = u'mapUniverse'
class Planetschematics(models.Model):
schematicid = models.IntegerField(primary_key=True, db_column='schematicID') # Field name made lowercase.
schematicname = models.CharField(max_length=765, db_column='schematicName', blank=True) # Field name made lowercase.
cycletime = models.IntegerField(null=True, db_column='cycleTime', blank=True) # Field name made lowercase.
class Meta:
db_table = u'planetSchematics'
class Planetschematicspinmap(models.Model):
schematicid = models.IntegerField(primary_key=True, db_column='schematicID') # Field name made lowercase.
pintypeid = models.IntegerField(primary_key=True, db_column='pinTypeID') # Field name made lowercase.
class Meta:
db_table = u'planetSchematicsPinMap'
class Planetschematicstypemap(models.Model):
schematicid = models.IntegerField(primary_key=True, db_column='schematicID') # Field name made lowercase.
typeid = models.IntegerField(primary_key=True, db_column='typeID') # Field name made lowercase.
quantity = models.IntegerField(null=True, blank=True)
isinput = models.IntegerField(null=True, db_column='isInput', blank=True) # Field name made lowercase.
class Meta:
db_table = u'planetSchematicsTypeMap'
class Ramactivities(models.Model):
activityid = models.IntegerField(primary_key=True, db_column='activityID') # Field name made lowercase.
activityname = models.CharField(max_length=300, db_column='activityName', blank=True) # Field name made lowercase.
iconno = models.CharField(max_length=15, db_column='iconNo', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
published = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'ramActivities'
class Ramassemblylinestations(models.Model):
stationid = models.IntegerField(primary_key=True, db_column='stationID') # Field name made lowercase.
assemblylinetypeid = models.IntegerField(primary_key=True, db_column='assemblyLineTypeID') # Field name made lowercase.
quantity = models.IntegerField(null=True, blank=True)
stationtypeid = models.IntegerField(null=True, db_column='stationTypeID', blank=True) # Field name made lowercase.
ownerid = models.IntegerField(null=True, db_column='ownerID', blank=True) # Field name made lowercase.
solarsystemid = models.IntegerField(null=True, db_column='solarSystemID', blank=True) # Field name made lowercase.
regionid = models.IntegerField(null=True, db_column='regionID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'ramAssemblyLineStations'
class Ramassemblylinetypedetailpercategory(models.Model):
assemblylinetypeid = models.IntegerField(primary_key=True, db_column='assemblyLineTypeID') # Field name made lowercase.
categoryid = models.IntegerField(primary_key=True, db_column='categoryID') # Field name made lowercase.
timemultiplier = models.FloatField(null=True, db_column='timeMultiplier', blank=True) # Field name made lowercase.
materialmultiplier = models.FloatField(null=True, db_column='materialMultiplier', blank=True) # Field name made lowercase.
class Meta:
db_table = u'ramAssemblyLineTypeDetailPerCategory'
class Ramassemblylinetypedetailpergroup(models.Model):
assemblylinetypeid = models.IntegerField(primary_key=True, db_column='assemblyLineTypeID') # Field name made lowercase.
groupid = models.IntegerField(primary_key=True, db_column='groupID') # Field name made lowercase.
timemultiplier = models.FloatField(null=True, db_column='timeMultiplier', blank=True) # Field name made lowercase.
materialmultiplier = models.FloatField(null=True, db_column='materialMultiplier', blank=True) # Field name made lowercase.
class Meta:
db_table = u'ramAssemblyLineTypeDetailPerGroup'
class Ramassemblylinetypes(models.Model):
assemblylinetypeid = models.IntegerField(primary_key=True, db_column='assemblyLineTypeID') # Field name made lowercase.
assemblylinetypename = models.CharField(max_length=300, db_column='assemblyLineTypeName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
basetimemultiplier = models.FloatField(null=True, db_column='baseTimeMultiplier', blank=True) # Field name made lowercase.
basematerialmultiplier = models.FloatField(null=True, db_column='baseMaterialMultiplier', blank=True) # Field name made lowercase.
volume = models.FloatField(null=True, blank=True)
activityid = models.IntegerField(null=True, db_column='activityID', blank=True) # Field name made lowercase.
mincostperhour = models.FloatField(null=True, db_column='minCostPerHour', blank=True) # Field name made lowercase.
class Meta:
db_table = u'ramAssemblyLineTypes'
class Ramassemblylines(models.Model):
assemblylineid = models.IntegerField(primary_key=True, db_column='assemblyLineID') # Field name made lowercase.
assemblylinetypeid = models.IntegerField(null=True, db_column='assemblyLineTypeID', blank=True) # Field name made lowercase.
containerid = models.IntegerField(null=True, db_column='containerID', blank=True) # Field name made lowercase.
nextfreetime = models.DateTimeField(null=True, db_column='nextFreeTime', blank=True) # Field name made lowercase.
uigroupingid = models.IntegerField(null=True, db_column='UIGroupingID', blank=True) # Field name made lowercase.
costinstall = models.FloatField(null=True, db_column='costInstall', blank=True) # Field name made lowercase.
costperhour = models.FloatField(null=True, db_column='costPerHour', blank=True) # Field name made lowercase.
restrictionmask = models.IntegerField(null=True, db_column='restrictionMask', blank=True) # Field name made lowercase.
discountpergoodstandingpoint = models.FloatField(null=True, db_column='discountPerGoodStandingPoint', blank=True) # Field name made lowercase.
surchargeperbadstandingpoint = models.FloatField(null=True, db_column='surchargePerBadStandingPoint', blank=True) # Field name made lowercase.
minimumstanding = models.FloatField(null=True, db_column='minimumStanding', blank=True) # Field name made lowercase.
minimumcharsecurity = models.FloatField(null=True, db_column='minimumCharSecurity', blank=True) # Field name made lowercase.
minimumcorpsecurity = models.FloatField(null=True, db_column='minimumCorpSecurity', blank=True) # Field name made lowercase.
maximumcharsecurity = models.FloatField(null=True, db_column='maximumCharSecurity', blank=True) # Field name made lowercase.
maximumcorpsecurity = models.FloatField(null=True, db_column='maximumCorpSecurity', blank=True) # Field name made lowercase.
ownerid = models.IntegerField(null=True, db_column='ownerID', blank=True) # Field name made lowercase.
activityid = models.IntegerField(null=True, db_column='activityID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'ramAssemblyLines'
class Raminstallationtypecontents(models.Model):
installationtypeid = models.IntegerField(primary_key=True, db_column='installationTypeID') # Field name made lowercase.
assemblylinetypeid = models.IntegerField(primary_key=True, db_column='assemblyLineTypeID') # Field name made lowercase.
quantity = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'ramInstallationTypeContents'
class Ramtyperequirements(models.Model):
typeid = models.IntegerField(primary_key=True, db_column='typeID') # Field name made lowercase.
activityid = models.IntegerField(primary_key=True, db_column='activityID') # Field name made lowercase.
requiredtypeid = models.IntegerField(primary_key=True, db_column='requiredTypeID') # Field name made lowercase.
quantity = models.IntegerField(null=True, blank=True)
damageperjob = models.FloatField(null=True, db_column='damagePerJob', blank=True) # Field name made lowercase.
recycle = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'ramTypeRequirements'
class Staoperationservices(models.Model):
operationid = models.IntegerField(primary_key=True, db_column='operationID') # Field name made lowercase.
serviceid = models.IntegerField(primary_key=True, db_column='serviceID') # Field name made lowercase.
class Meta:
db_table = u'staOperationServices'
class Staoperations(models.Model):
activityid = models.IntegerField(null=True, db_column='activityID', blank=True) # Field name made lowercase.
operationid = models.IntegerField(primary_key=True, db_column='operationID') # Field name made lowercase.
operationname = models.CharField(max_length=300, db_column='operationName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
fringe = models.IntegerField(null=True, blank=True)
corridor = models.IntegerField(null=True, blank=True)
hub = models.IntegerField(null=True, blank=True)
border = models.IntegerField(null=True, blank=True)
ratio = models.IntegerField(null=True, blank=True)
caldaristationtypeid = models.IntegerField(null=True, db_column='caldariStationTypeID', blank=True) # Field name made lowercase.
minmatarstationtypeid = models.IntegerField(null=True, db_column='minmatarStationTypeID', blank=True) # Field name made lowercase.
amarrstationtypeid = models.IntegerField(null=True, db_column='amarrStationTypeID', blank=True) # Field name made lowercase.
gallentestationtypeid = models.IntegerField(null=True, db_column='gallenteStationTypeID', blank=True) # Field name made lowercase.
jovestationtypeid = models.IntegerField(null=True, db_column='joveStationTypeID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'staOperations'
class Staservices(models.Model):
serviceid = models.IntegerField(primary_key=True, db_column='serviceID') # Field name made lowercase.
servicename = models.CharField(max_length=300, db_column='serviceName', blank=True) # Field name made lowercase.
description = models.CharField(max_length=3000, blank=True)
class Meta:
db_table = u'staServices'
class Stastationtypes(models.Model):
stationtypeid = models.IntegerField(primary_key=True, db_column='stationTypeID') # Field name made lowercase.
dockentryx = models.FloatField(null=True, db_column='dockEntryX', blank=True) # Field name made lowercase.
dockentryy = models.FloatField(null=True, db_column='dockEntryY', blank=True) # Field name made lowercase.
dockentryz = models.FloatField(null=True, db_column='dockEntryZ', blank=True) # Field name made lowercase.
dockorientationx = models.FloatField(null=True, db_column='dockOrientationX', blank=True) # Field name made lowercase.
dockorientationy = models.FloatField(null=True, db_column='dockOrientationY', blank=True) # Field name made lowercase.
dockorientationz = models.FloatField(null=True, db_column='dockOrientationZ', blank=True) # Field name made lowercase.
operationid = models.IntegerField(null=True, db_column='operationID', blank=True) # Field name made lowercase.
officeslots = models.IntegerField(null=True, db_column='officeSlots', blank=True) # Field name made lowercase.
reprocessingefficiency = models.FloatField(null=True, db_column='reprocessingEfficiency', blank=True) # Field name made lowercase.
conquerable = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'staStationTypes'
class Stastations(models.Model):
stationid = models.IntegerField(primary_key=True, db_column='stationID') # Field name made lowercase.
security = models.IntegerField(null=True, blank=True)
dockingcostpervolume = models.FloatField(null=True, db_column='dockingCostPerVolume', blank=True) # Field name made lowercase.
maxshipvolumedockable = models.FloatField(null=True, db_column='maxShipVolumeDockable', blank=True) # Field name made lowercase.
officerentalcost = models.IntegerField(null=True, db_column='officeRentalCost', blank=True) # Field name made lowercase.
operationid = models.IntegerField(null=True, db_column='operationID', blank=True) # Field name made lowercase.
stationtypeid = models.IntegerField(null=True, db_column='stationTypeID', blank=True) # Field name made lowercase.
corporationid = models.IntegerField(null=True, db_column='corporationID', blank=True) # Field name made lowercase.
solarsystemid = models.IntegerField(null=True, db_column='solarSystemID', blank=True) # Field name made lowercase.
constellationid = models.IntegerField(null=True, db_column='constellationID', blank=True) # Field name made lowercase.
regionid = models.IntegerField(null=True, db_column='regionID', blank=True) # Field name made lowercase.
stationname = models.CharField(max_length=300, db_column='stationName', blank=True) # Field name made lowercase.
x = models.FloatField(null=True, blank=True)
y = models.FloatField(null=True, blank=True)
z = models.FloatField(null=True, blank=True)
reprocessingefficiency = models.FloatField(null=True, db_column='reprocessingEfficiency', blank=True) # Field name made lowercase.
reprocessingstationstake = models.FloatField(null=True, db_column='reprocessingStationsTake', blank=True) # Field name made lowercase.
reprocessinghangarflag = models.IntegerField(null=True, db_column='reprocessingHangarFlag', blank=True) # Field name made lowercase.
class Meta:
db_table = u'staStations'
class Translationtables(models.Model):
sourcetable = models.CharField(max_length=600, primary_key=True, db_column='sourceTable') # Field name made lowercase.
destinationtable = models.CharField(max_length=600, db_column='destinationTable', blank=True) # Field name made lowercase.
translatedkey = models.CharField(max_length=600, primary_key=True, db_column='translatedKey') # Field name made lowercase.
tcgroupid = models.IntegerField(null=True, db_column='tcGroupID', blank=True) # Field name made lowercase.
tcid = models.IntegerField(null=True, db_column='tcID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'translationTables'
class Trntranslationcolumns(models.Model):
tcgroupid = models.IntegerField(null=True, db_column='tcGroupID', blank=True) # Field name made lowercase.
tcid = models.IntegerField(primary_key=True, db_column='tcID') # Field name made lowercase.
tablename = models.CharField(max_length=768, db_column='tableName') # Field name made lowercase.
columnname = models.CharField(max_length=384, db_column='columnName') # Field name made lowercase.
masterid = models.CharField(max_length=384, db_column='masterID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'trnTranslationColumns'
class Trntranslationlanguages(models.Model):
numericlanguageid = models.IntegerField(primary_key=True, db_column='numericLanguageID') # Field name made lowercase.
languageid = models.CharField(max_length=150, db_column='languageID', blank=True) # Field name made lowercase.
languagename = models.CharField(max_length=600, db_column='languageName', blank=True) # Field name made lowercase.
class Meta:
db_table = u'trnTranslationLanguages'
class Trntranslations(models.Model):
tcid = models.IntegerField(primary_key=True, db_column='tcID') # Field name made lowercase.
keyid = models.IntegerField(primary_key=True, db_column='keyID') # Field name made lowercase.
languageid = models.CharField(max_length=150, primary_key=True, db_column='languageID') # Field name made lowercase.
text = models.TextField(blank=True)
class Meta:
db_table = u'trnTranslations'
class Warcombatzonesystems(models.Model):
solarsystemid = models.IntegerField(primary_key=True, db_column='solarSystemID') # Field name made lowercase.
combatzoneid = models.IntegerField(null=True, db_column='combatZoneID', blank=True) # Field name made lowercase.
class Meta:
db_table = u'warCombatZoneSystems'
class Warcombatzones(models.Model):
combatzoneid = models.IntegerField(primary_key=True, db_column='combatZoneID') # Field name made lowercase.
combatzonename = models.CharField(max_length=300, db_column='combatZoneName', blank=True) # Field name made lowercase.
factionid = models.IntegerField(null=True, db_column='factionID', blank=True) # Field name made lowercase.
centersystemid = models.IntegerField(null=True, db_column='centerSystemID', blank=True) # Field name made lowercase.
description = models.CharField(max_length=1500, blank=True)
class Meta:
db_table = u'warCombatZones'
|
st4lk/babel | refs/heads/master | babel/numbers.py | 2 | # -*- coding: utf-8 -*-
"""
babel.numbers
~~~~~~~~~~~~~
Locale dependent formatting and parsing of numeric data.
The default locale for the functions in this module is determined by the
following environment variables, in that order:
* ``LC_NUMERIC``,
* ``LC_ALL``, and
* ``LANG``
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
# TODO:
# Padding and rounding increments in pattern:
# - http://www.unicode.org/reports/tr35/ (Appendix G.6)
import re
from datetime import date as date_, datetime as datetime_
from babel.core import default_locale, Locale, get_global
from babel._compat import range_type, Decimal, InvalidOperation, ROUND_HALF_EVEN
LC_NUMERIC = default_locale('LC_NUMERIC')
def get_currency_name(currency, count=None, locale=LC_NUMERIC):
"""Return the name used by the locale for the specified currency.
>>> get_currency_name('USD', locale='en_US')
u'US Dollar'
.. versionadded:: 0.9.4
:param currency: the currency code
:param count: the optional count. If provided the currency name
will be pluralized to that number if possible.
:param locale: the `Locale` object or locale identifier
"""
loc = Locale.parse(locale)
if count is not None:
plural_form = loc.plural_form(count)
plural_names = loc._data['currency_names_plural']
if currency in plural_names:
return plural_names[currency][plural_form]
return loc.currencies.get(currency, currency)
def get_currency_symbol(currency, locale=LC_NUMERIC):
"""Return the symbol used by the locale for the specified currency.
>>> get_currency_symbol('USD', locale='en_US')
u'$'
:param currency: the currency code
:param locale: the `Locale` object or locale identifier
"""
return Locale.parse(locale).currency_symbols.get(currency, currency)
def get_territory_currencies(territory, start_date=None, end_date=None,
tender=True, non_tender=False,
include_details=False):
"""Returns the list of currencies for the given territory that are valid for
the given date range. In addition to that the currency database
distinguishes between tender and non-tender currencies. By default only
tender currencies are returned.
The return value is a list of all currencies roughly ordered by the time
of when the currency became active. The longer the currency is being in
use the more to the left of the list it will be.
The start date defaults to today. If no end date is given it will be the
same as the start date. Otherwise a range can be defined. For instance
this can be used to find the currencies in use in Austria between 1995 and
2011:
>>> from datetime import date
>>> get_territory_currencies('AT', date(1995, 1, 1), date(2011, 1, 1))
['ATS', 'EUR']
Likewise it's also possible to find all the currencies in use on a
single date:
>>> get_territory_currencies('AT', date(1995, 1, 1))
['ATS']
>>> get_territory_currencies('AT', date(2011, 1, 1))
['EUR']
By default the return value only includes tender currencies. This
however can be changed:
>>> get_territory_currencies('US')
['USD']
>>> get_territory_currencies('US', tender=False, non_tender=True,
... start_date=date(2014, 1, 1))
['USN', 'USS']
.. versionadded:: 2.0
:param territory: the name of the territory to find the currency fo
:param start_date: the start date. If not given today is assumed.
:param end_date: the end date. If not given the start date is assumed.
:param tender: controls whether tender currencies should be included.
:param non_tender: controls whether non-tender currencies should be
included.
:param include_details: if set to `True`, instead of returning currency
codes the return value will be dictionaries
with detail information. In that case each
dictionary will have the keys ``'currency'``,
``'from'``, ``'to'``, and ``'tender'``.
"""
currencies = get_global('territory_currencies')
if start_date is None:
start_date = date_.today()
elif isinstance(start_date, datetime_):
start_date = start_date.date()
if end_date is None:
end_date = start_date
elif isinstance(end_date, datetime_):
end_date = end_date.date()
curs = currencies.get(territory.upper(), ())
# TODO: validate that the territory exists
def _is_active(start, end):
return (start is None or start <= end_date) and \
(end is None or end >= start_date)
result = []
for currency_code, start, end, is_tender in curs:
if start:
start = date_(*start)
if end:
end = date_(*end)
if ((is_tender and tender) or \
(not is_tender and non_tender)) and _is_active(start, end):
if include_details:
result.append({
'currency': currency_code,
'from': start,
'to': end,
'tender': is_tender,
})
else:
result.append(currency_code)
return result
def get_decimal_symbol(locale=LC_NUMERIC):
"""Return the symbol used by the locale to separate decimal fractions.
>>> get_decimal_symbol('en_US')
u'.'
:param locale: the `Locale` object or locale identifier
"""
return Locale.parse(locale).number_symbols.get('decimal', u'.')
def get_plus_sign_symbol(locale=LC_NUMERIC):
"""Return the plus sign symbol used by the current locale.
>>> get_plus_sign_symbol('en_US')
u'+'
:param locale: the `Locale` object or locale identifier
"""
return Locale.parse(locale).number_symbols.get('plusSign', u'+')
def get_minus_sign_symbol(locale=LC_NUMERIC):
"""Return the plus sign symbol used by the current locale.
>>> get_minus_sign_symbol('en_US')
u'-'
:param locale: the `Locale` object or locale identifier
"""
return Locale.parse(locale).number_symbols.get('minusSign', u'-')
def get_exponential_symbol(locale=LC_NUMERIC):
"""Return the symbol used by the locale to separate mantissa and exponent.
>>> get_exponential_symbol('en_US')
u'E'
:param locale: the `Locale` object or locale identifier
"""
return Locale.parse(locale).number_symbols.get('exponential', u'E')
def get_group_symbol(locale=LC_NUMERIC):
"""Return the symbol used by the locale to separate groups of thousands.
>>> get_group_symbol('en_US')
u','
:param locale: the `Locale` object or locale identifier
"""
return Locale.parse(locale).number_symbols.get('group', u',')
def format_number(number, locale=LC_NUMERIC):
u"""Return the given number formatted for a specific locale.
>>> format_number(1099, locale='en_US')
u'1,099'
>>> format_number(1099, locale='de_DE')
u'1.099'
:param number: the number to format
:param locale: the `Locale` object or locale identifier
"""
# Do we really need this one?
return format_decimal(number, locale=locale)
def format_decimal(number, format=None, locale=LC_NUMERIC):
u"""Return the given decimal number formatted for a specific locale.
>>> format_decimal(1.2345, locale='en_US')
u'1.234'
>>> format_decimal(1.2346, locale='en_US')
u'1.235'
>>> format_decimal(-1.2346, locale='en_US')
u'-1.235'
>>> format_decimal(1.2345, locale='sv_SE')
u'1,234'
>>> format_decimal(1.2345, locale='de')
u'1,234'
The appropriate thousands grouping and the decimal separator are used for
each locale:
>>> format_decimal(12345.5, locale='en_US')
u'12,345.5'
:param number: the number to format
:param format:
:param locale: the `Locale` object or locale identifier
"""
locale = Locale.parse(locale)
if not format:
format = locale.decimal_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale)
class UnknownCurrencyFormatError(KeyError):
"""Exception raised when an unknown currency format is requested."""
def format_currency(number, currency, format=None, locale=LC_NUMERIC,
currency_digits=True, format_type='standard'):
u"""Return formatted currency value.
>>> format_currency(1099.98, 'USD', locale='en_US')
u'$1,099.98'
>>> format_currency(1099.98, 'USD', locale='es_CO')
u'US$1.099,98'
>>> format_currency(1099.98, 'EUR', locale='de_DE')
u'1.099,98\\xa0\\u20ac'
The format can also be specified explicitly. The currency is
placed with the '¤' sign. As the sign gets repeated the format
expands (¤ being the symbol, ¤¤ is the currency abbreviation and
¤¤¤ is the full name of the currency):
>>> format_currency(1099.98, 'EUR', u'\xa4\xa4 #,##0.00', locale='en_US')
u'EUR 1,099.98'
>>> format_currency(1099.98, 'EUR', u'#,##0.00 \xa4\xa4\xa4', locale='en_US')
u'1,099.98 euros'
Currencies usually have a specific number of decimal digits. This function
favours that information over the given format:
>>> format_currency(1099.98, 'JPY', locale='en_US')
u'\\xa51,100'
>>> format_currency(1099.98, 'COP', u'#,##0.00', locale='es_ES')
u'1.100'
However, the number of decimal digits can be overriden from the currency
information, by setting the last parameter to ``False``:
>>> format_currency(1099.98, 'JPY', locale='en_US', currency_digits=False)
u'\\xa51,099.98'
>>> format_currency(1099.98, 'COP', u'#,##0.00', locale='es_ES', currency_digits=False)
u'1.099,98'
If a format is not specified the type of currency format to use
from the locale can be specified:
>>> format_currency(1099.98, 'EUR', locale='en_US', format_type='standard')
u'\\u20ac1,099.98'
When the given currency format type is not available, an exception is
raised:
>>> format_currency('1099.98', 'EUR', locale='root', format_type='unknown')
Traceback (most recent call last):
...
UnknownCurrencyFormatError: "'unknown' is not a known currency format type"
:param number: the number to format
:param currency: the currency code
:param format: the format string to use
:param locale: the `Locale` object or locale identifier
:param currency_digits: use the currency's number of decimal digits
:param format_type: the currency format type to use
"""
locale = Locale.parse(locale)
if format:
pattern = parse_pattern(format)
else:
try:
pattern = locale.currency_formats[format_type]
except KeyError:
raise UnknownCurrencyFormatError("%r is not a known currency format"
" type" % format_type)
if currency_digits:
fractions = get_global('currency_fractions')
try:
digits = fractions[currency][0]
except KeyError:
digits = fractions['DEFAULT'][0]
frac = (digits, digits)
else:
frac = None
return pattern.apply(number, locale, currency=currency, force_frac=frac)
def format_percent(number, format=None, locale=LC_NUMERIC):
"""Return formatted percent value for a specific locale.
>>> format_percent(0.34, locale='en_US')
u'34%'
>>> format_percent(25.1234, locale='en_US')
u'2,512%'
>>> format_percent(25.1234, locale='sv_SE')
u'2\\xa0512\\xa0%'
The format pattern can also be specified explicitly:
>>> format_percent(25.1234, u'#,##0\u2030', locale='en_US')
u'25,123\u2030'
:param number: the percent number to format
:param format:
:param locale: the `Locale` object or locale identifier
"""
locale = Locale.parse(locale)
if not format:
format = locale.percent_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale)
def format_scientific(number, format=None, locale=LC_NUMERIC):
"""Return value formatted in scientific notation for a specific locale.
>>> format_scientific(10000, locale='en_US')
u'1E4'
The format pattern can also be specified explicitly:
>>> format_scientific(1234567, u'##0E00', locale='en_US')
u'1.23E06'
:param number: the number to format
:param format:
:param locale: the `Locale` object or locale identifier
"""
locale = Locale.parse(locale)
if not format:
format = locale.scientific_formats.get(format)
pattern = parse_pattern(format)
return pattern.apply(number, locale)
class NumberFormatError(ValueError):
"""Exception raised when a string cannot be parsed into a number."""
def parse_number(string, locale=LC_NUMERIC):
"""Parse localized number string into an integer.
>>> parse_number('1,099', locale='en_US')
1099
>>> parse_number('1.099', locale='de_DE')
1099
When the given string cannot be parsed, an exception is raised:
>>> parse_number('1.099,98', locale='de')
Traceback (most recent call last):
...
NumberFormatError: '1.099,98' is not a valid number
:param string: the string to parse
:param locale: the `Locale` object or locale identifier
:return: the parsed number
:raise `NumberFormatError`: if the string can not be converted to a number
"""
try:
return int(string.replace(get_group_symbol(locale), ''))
except ValueError:
raise NumberFormatError('%r is not a valid number' % string)
def parse_decimal(string, locale=LC_NUMERIC):
"""Parse localized decimal string into a decimal.
>>> parse_decimal('1,099.98', locale='en_US')
Decimal('1099.98')
>>> parse_decimal('1.099,98', locale='de')
Decimal('1099.98')
When the given string cannot be parsed, an exception is raised:
>>> parse_decimal('2,109,998', locale='de')
Traceback (most recent call last):
...
NumberFormatError: '2,109,998' is not a valid decimal number
:param string: the string to parse
:param locale: the `Locale` object or locale identifier
:raise NumberFormatError: if the string can not be converted to a
decimal number
"""
locale = Locale.parse(locale)
try:
return Decimal(string.replace(get_group_symbol(locale), '')
.replace(get_decimal_symbol(locale), '.'))
except InvalidOperation:
raise NumberFormatError('%r is not a valid decimal number' % string)
PREFIX_END = r'[^0-9@#.,]'
NUMBER_TOKEN = r'[0-9@#.,E+]'
PREFIX_PATTERN = r"(?P<prefix>(?:'[^']*'|%s)*)" % PREFIX_END
NUMBER_PATTERN = r"(?P<number>%s+)" % NUMBER_TOKEN
SUFFIX_PATTERN = r"(?P<suffix>.*)"
number_re = re.compile(r"%s%s%s" % (PREFIX_PATTERN, NUMBER_PATTERN,
SUFFIX_PATTERN))
def parse_grouping(p):
"""Parse primary and secondary digit grouping
>>> parse_grouping('##')
(1000, 1000)
>>> parse_grouping('#,###')
(3, 3)
>>> parse_grouping('#,####,###')
(3, 4)
"""
width = len(p)
g1 = p.rfind(',')
if g1 == -1:
return 1000, 1000
g1 = width - g1 - 1
g2 = p[:-g1 - 1].rfind(',')
if g2 == -1:
return g1, g1
g2 = width - g1 - g2 - 2
return g1, g2
def parse_pattern(pattern):
"""Parse number format patterns"""
if isinstance(pattern, NumberPattern):
return pattern
def _match_number(pattern):
rv = number_re.search(pattern)
if rv is None:
raise ValueError('Invalid number pattern %r' % pattern)
return rv.groups()
# Do we have a negative subpattern?
if ';' in pattern:
pattern, neg_pattern = pattern.split(';', 1)
pos_prefix, number, pos_suffix = _match_number(pattern)
neg_prefix, _, neg_suffix = _match_number(neg_pattern)
else:
pos_prefix, number, pos_suffix = _match_number(pattern)
neg_prefix = '-' + pos_prefix
neg_suffix = pos_suffix
if 'E' in number:
number, exp = number.split('E', 1)
else:
exp = None
if '@' in number:
if '.' in number and '0' in number:
raise ValueError('Significant digit patterns can not contain '
'"@" or "0"')
if '.' in number:
integer, fraction = number.rsplit('.', 1)
else:
integer = number
fraction = ''
def parse_precision(p):
"""Calculate the min and max allowed digits"""
min = max = 0
for c in p:
if c in '@0':
min += 1
max += 1
elif c == '#':
max += 1
elif c == ',':
continue
else:
break
return min, max
int_prec = parse_precision(integer)
frac_prec = parse_precision(fraction)
if exp:
frac_prec = parse_precision(integer+fraction)
exp_plus = exp.startswith('+')
exp = exp.lstrip('+')
exp_prec = parse_precision(exp)
else:
exp_plus = None
exp_prec = None
grouping = parse_grouping(integer)
return NumberPattern(pattern, (pos_prefix, neg_prefix),
(pos_suffix, neg_suffix), grouping,
int_prec, frac_prec,
exp_prec, exp_plus)
class NumberPattern(object):
def __init__(self, pattern, prefix, suffix, grouping,
int_prec, frac_prec, exp_prec, exp_plus):
self.pattern = pattern
self.prefix = prefix
self.suffix = suffix
self.grouping = grouping
self.int_prec = int_prec
self.frac_prec = frac_prec
self.exp_prec = exp_prec
self.exp_plus = exp_plus
if '%' in ''.join(self.prefix + self.suffix):
self.scale = 2
elif u'‰' in ''.join(self.prefix + self.suffix):
self.scale = 3
else:
self.scale = 0
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.pattern)
def apply(self, value, locale, currency=None, force_frac=None):
frac_prec = force_frac or self.frac_prec
if not isinstance(value, Decimal):
value = Decimal(str(value))
value = value.scaleb(self.scale)
is_negative = int(value.is_signed())
if self.exp_prec: # Scientific notation
exp = value.adjusted()
value = abs(value)
# Minimum number of integer digits
if self.int_prec[0] == self.int_prec[1]:
exp -= self.int_prec[0] - 1
# Exponent grouping
elif self.int_prec[1]:
exp = int(exp / self.int_prec[1]) * self.int_prec[1]
if exp < 0:
value = value * 10**(-exp)
else:
value = value / 10**exp
exp_sign = ''
if exp < 0:
exp_sign = get_minus_sign_symbol(locale)
elif self.exp_plus:
exp_sign = get_plus_sign_symbol(locale)
exp = abs(exp)
number = u'%s%s%s%s' % \
(self._format_significant(value, frac_prec[0], frac_prec[1]),
get_exponential_symbol(locale), exp_sign,
self._format_int(str(exp), self.exp_prec[0],
self.exp_prec[1], locale))
elif '@' in self.pattern: # Is it a siginificant digits pattern?
text = self._format_significant(abs(value),
self.int_prec[0],
self.int_prec[1])
a, sep, b = text.partition(".")
number = self._format_int(a, 0, 1000, locale)
if sep:
number += get_decimal_symbol(locale) + b
else: # A normal number pattern
precision = Decimal('1.' + '1' * frac_prec[1])
rounded = value.quantize(precision, ROUND_HALF_EVEN)
a, sep, b = str(abs(rounded)).partition(".")
number = (self._format_int(a, self.int_prec[0],
self.int_prec[1], locale) +
self._format_frac(b or '0', locale, force_frac))
retval = u'%s%s%s' % (self.prefix[is_negative], number,
self.suffix[is_negative])
if u'¤' in retval:
retval = retval.replace(u'¤¤¤',
get_currency_name(currency, value, locale))
retval = retval.replace(u'¤¤', currency.upper())
retval = retval.replace(u'¤', get_currency_symbol(currency, locale))
return retval
#
# This is one tricky piece of code. The idea is to rely as much as possible
# on the decimal module to minimize the amount of code.
#
# Conceptually, the implementation of this method can be summarized in the
# following steps:
#
# - Move or shift the decimal point (i.e. the exponent) so the maximum
# amount of significant digits fall into the integer part (i.e. to the
# left of the decimal point)
#
# - Round the number to the nearest integer, discarding all the fractional
# part which contained extra digits to be eliminated
#
# - Convert the rounded integer to a string, that will contain the final
# sequence of significant digits already trimmed to the maximum
#
# - Restore the original position of the decimal point, potentially
# padding with zeroes on either side
#
def _format_significant(self, value, minimum, maximum):
exp = value.adjusted()
scale = maximum - 1 - exp
digits = str(value.scaleb(scale).quantize(Decimal(1), ROUND_HALF_EVEN))
if scale <= 0:
result = digits + '0' * -scale
else:
intpart = digits[:-scale]
i = len(intpart)
j = i + max(minimum - i, 0)
result = "{intpart}.{pad:0<{fill}}{fracpart}{fracextra}".format(
intpart=intpart or '0',
pad='',
fill=-min(exp + 1, 0),
fracpart=digits[i:j],
fracextra=digits[j:].rstrip('0'),
).rstrip('.')
return result
def _format_int(self, value, min, max, locale):
width = len(value)
if width < min:
value = '0' * (min - width) + value
gsize = self.grouping[0]
ret = ''
symbol = get_group_symbol(locale)
while len(value) > gsize:
ret = symbol + value[-gsize:] + ret
value = value[:-gsize]
gsize = self.grouping[1]
return value + ret
def _format_frac(self, value, locale, force_frac=None):
min, max = force_frac or self.frac_prec
if len(value) < min:
value += ('0' * (min - len(value)))
if max == 0 or (min == 0 and int(value) == 0):
return ''
width = len(value)
while len(value) > min and value[-1] == '0':
value = value[:-1]
return get_decimal_symbol(locale) + value
|
cobalys/django | refs/heads/master | django/contrib/gis/geometry/backend/__init__.py | 128 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
geom_backend = getattr(settings, 'GEOMETRY_BACKEND', 'geos')
try:
module = import_module('.%s' % geom_backend, 'django.contrib.gis.geometry.backend')
except ImportError:
try:
module = import_module(geom_backend)
except ImportError:
raise ImproperlyConfigured('Could not import user-defined GEOMETRY_BACKEND '
'"%s".' % geom_backend)
try:
Geometry = module.Geometry
GeometryException = module.GeometryException
except AttributeError:
raise ImproperlyConfigured('Cannot import Geometry from the "%s" '
'geometry backend.' % geom_backend)
|
obsh/tornado | refs/heads/master | tornado/test/websocket_test.py | 1 | from __future__ import absolute_import, division, print_function, with_statement
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log, app_log
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.test.util import unittest
from tornado.web import Application, RequestHandler
try:
import tornado.websocket # noqa
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
try:
from tornado import speedups
except ImportError:
speedups = None
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
def initialize(self, close_future, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(message, isinstance(message, bytes))
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1 / 0
class HeaderHandler(TestWebSocketHandler):
def open(self):
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
self.set_status(503)
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get('X-Test', ''))
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class PathArgsHandler(TestWebSocketHandler):
def open(self, arg):
self.write_message(arg)
class HandshakeResponseHeadersHandler(TestWebSocketHandler):
def prepare(self):
self._headers['Set-Cookie'] = 'hello=1'
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, compression_options=None):
ws = yield websocket_connect(
'ws://127.0.0.1:%d%s' % (self.get_http_port(), path),
compression_options=compression_options)
raise gen.Return(ws)
@gen.coroutine
def close(self, ws):
"""Close a websocket connection and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
"""
ws.close()
yield self.close_future
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
('/header', HeaderHandler, dict(close_future=self.close_future)),
('/close_reason', CloseReasonHandler,
dict(close_future=self.close_future)),
('/error_in_on_message', ErrorInOnMessageHandler,
dict(close_future=self.close_future)),
('/async_prepare', AsyncPrepareHandler,
dict(close_future=self.close_future)),
('/path_args/(.*)', PathArgsHandler,
dict(close_future=self.close_future)),
('/handshake_response_headers', HandshakeResponseHeadersHandler,
dict(close_future=self.close_future)),
])
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
self.assertEqual(response.headers.get('Content-Type'),
"text/html; charset=UTF-8")
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect('/echo')
yield ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
def test_websocket_callbacks(self):
websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port(),
io_loop=self.io_loop, callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(b'hello \xe9', binary=True)
response = yield ws.read_message()
self.assertEqual(response, b'hello \xe9')
yield self.close(ws)
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(u'hello \u00e9')
response = yield ws.read_message()
self.assertEqual(response, u'hello \u00e9')
yield self.close(ws)
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect('/error_in_on_message')
ws.write_message('hello')
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
yield self.close(ws)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect('/notfound')
self.assertEqual(cm.exception.code, 404)
self.assertEqual(cm.exception.response.headers.get('Content-Type'),
"text/html; charset=UTF-8")
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect('/non_ws')
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://127.0.0.1:%d/' % port,
io_loop=self.io_loop,
connect_timeout=3600)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
# Close the underlying stream.
ws.stream.close()
yield self.close_future
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header' % self.get_http_port(),
headers={'X-Test': 'hello'}))
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
code, reason = yield self.close_future
# The client echoed the close code it received to the server,
# so the server's close code (returned via close_future) is
# the same.
self.assertEqual(code, 1001)
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect('/echo')
ws.close(1001, 'goodbye')
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, 'goodbye')
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect('/async_prepare')
ws.write_message('hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_path_args(self):
ws = yield self.ws_connect('/path_args/hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d/something' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': '127.0.0.1:%d' % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
self.assertEqual(cm.exception.response.headers.get('Content-Type'),
"text/html; charset=UTF-8")
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {'Origin': 'http://somewhereelse.com'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
self.assertEqual(cm.exception.response.headers.get('Content-Type'),
"text/html; charset=UTF-8")
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {'Origin': 'http://subtenant.localhost'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
# check default Content-Type is set
self.assertEqual(cm.exception.response.headers.get('Content-Type'),
"text/html; charset=UTF-8")
@gen_test
def test_success_response_content_type(self):
ws = yield self.ws_connect('/echo')
self.assertIsNone(ws.headers.get('Content-Type'))
@gen_test
def test_handshake_response_headers(self):
ws = yield self.ws_connect('/handshake_response_headers')
self.assertEqual(ws.headers.get('Set-Cookie'), 'hello=1')
def test_handshake_response_headers_error(self):
# WS server, HTTP client.
response = self.fetch('/handshake_response_headers')
self.assertEqual(response.code, 400)
# extra headers should not be passed on error
self.assertIsNone(response.headers.get('Set-Cookie'))
self.assertEqual(response.headers.get('Content-Type'),
"text/html; charset=UTF-8")
class CompressionTestMixin(object):
MESSAGE = 'Hello world. Testing 123 123'
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(
close_future=self.close_future,
compression_options=self.get_server_compression_options())),
])
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
'/echo',
compression_options=self.get_client_compression_options())
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in,
ws.protocol._wire_bytes_out)
yield self.close(ws)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b'abcd', b''), b'')
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
b'\xff\xfb\xfd\xfc\xfe\xfa'),
b'\xff\xfa\xff\xff\xfe\xfb')
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
b'\x00\x01\x02\x03\x04\x05'),
b'\xff\xfa\xff\xff\xfb\xfe')
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
|
speed-of-light/pyslider | refs/heads/master | lib/exp/tools/slider.py | 1 | import cv2
from ..summary import Summary
from pdf_reader import PdfReader
class Slider(PdfReader):
"""
A helper to get slide information
"""
def __init__(self, root="", name=""):
PdfReader.__init__(self, root, name)
def __blank_slide(self, gray=False):
sp = self.slides_path()
img = cv2.imread("{}/{:03d}.jpg".format(sp, 1))
img[:] = (255, 255, 255) # fill white
# red cross
red = (0, 0, 255)
line_width = int(0.1*img.shape[0])
topL = (0, 0)
botR = (img.shape[1], img.shape[0])
topR = (0, img.shape[0])
botL = (img.shape[1], 0)
cv2.line(img, topL, botR, red, line_width)
cv2.line(img, topR, botL, red, line_width)
if gray:
return img[0]
else:
return img
def __is_valid_sid(self, index, count):
return (index > 0 and index < count+1)
def __img_path(self, idx):
sp = self.slides_path(size='big')
return "{}/{:03d}.jpg".format(sp, idx)
def __info(self):
su = Summary()
sin = su.info(self.root, self.name)
return sin
def slide_pages(self):
ps = PdfReader(self.root, self.name)
return ps.pages()
def slides_path(self, size='mid'):
ps = PdfReader(self.root, self.name)
return ps.slides_path(size)
def __make_img(self, idx, itop, path, gray):
if self.__is_valid_sid(idx, itop):
if gray:
img = cv2.imread(path, cv2.COLOR_GRAY2BGR)
else:
img = cv2.imread(path)
else:
img = self.__blank_slide(gray)
return img
def get_slides(self, ids=[], gray=False, resize=None):
"""
Get slide images collection
use img[:, :, [2, 1, 0]] to convert for matplotlib
"""
sin = self.__info()
if ids is None:
ids = range(1, sin.n_slides+1)
if resize is True:
resize = (sin.v_width, sin.v_height)
for si in ids:
sp = self.__img_path(si)
img = self.__make_img(si, sin.n_slides, sp, gray)
if resize is not None:
img = cv2.resize(img, resize)
yield(dict(img=img, idx=si))
|
haeusser/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/random_ops_test.py | 39 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class RandomNormalTest(test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
rnd2 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
class TruncatedNormalTest(test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.truncated_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
# NOTE: TruncatedNormal on GPU is not supported.
if not test.is_gpu_available():
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=False)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
# Skip the test if there is no GPU.
if not test.is_gpu_available():
return
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
# We need a particular larger number of samples to test multiple rounds
# on GPU
sampler = self._Sampler(
200000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
# The effective standard deviation of truncated normal is 85% of the
# requested one.
def testStdDev(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
stddev = 3.0
sampler = self._Sampler(100000, 0.0, stddev, dt, use_gpu=True)
x = sampler()
print("std(x)", np.std(x), abs(np.std(x) / stddev - 0.85))
self.assertTrue(abs(np.std(x) / stddev - 0.85) < 0.04)
def testNoCSE(self):
with self.test_session(use_gpu=True):
shape = [2, 3, 4]
rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
class RandomUniformTest(test.TestCase):
def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_uniform(
[num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
def testRange(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
sampler = self._Sampler(1000, minv=-2, maxv=8, dtype=dt, use_gpu=True)
x = sampler()
self.assertTrue(-2 <= np.min(x))
self.assertTrue(np.max(x) < 8)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
maxv = 1.0 if dt.is_floating else 1 << 30
sampler = self._Sampler(1000, minv=0, maxv=maxv, dtype=dt, use_gpu=True)
x = sampler()
y = sampler()
count = (x == y).sum()
count_limit = 50 if dt == dtypes.float16 else 10
if count >= count_limit:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < count_limit)
# Check that uniform ints actually follow a uniform distribution.
def testUniformInts(self):
minv = -2
maxv = 15
n = 100000
p = 1 / (maxv - minv)
# The counts should follow an (n, p) binomial distribution.
mean = p * n
std = np.sqrt(n * p * (1 - p))
for dt in dtypes.int32, dtypes.int64:
# Use a fixed seed here to make the test deterministic.
# Without the fixed seed, the 5 * std bound will (very rarely) fail.
sampler = self._Sampler(
n // 10, minv=minv, maxv=maxv, dtype=dt, use_gpu=True, seed=17)
x = sampler().ravel()
self.assertEqual(x.shape, (n,))
counts, _ = np.histogram(x, bins=maxv - minv)
self.assertEqual(counts.shape, (maxv - minv,))
self.assertEqual(counts.sum(), n)
error = np.abs(counts - mean)
self.assertLess(error.max(), 5 * std)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
maxv = 1.0 if dt.is_floating else 17
results = {}
for use_gpu in False, True:
sampler = self._Sampler(
1000, minv=0, maxv=maxv, dtype=dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
self.assertAllEqual(results[False], results[True])
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
for seed in [345, 2**100, -2**100]:
sx = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
sy = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
shape = [2, 3, 4]
for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
with self.test_session(use_gpu=True):
rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
diff = (rnd2 - rnd1).eval()
self.assertTrue(np.linalg.norm(diff) > 0.1)
class RandomShapeTest(test.TestCase):
def testTruncatedNormal(self):
# Fully known shape.
rnd1 = random_ops.truncated_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.truncated_normal(
array_ops.placeholder(
dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.truncated_normal(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomNormal(self):
# Fully known shape.
rnd1 = random_ops.random_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.random_normal(
array_ops.placeholder(
dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.random_normal(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomUniform(self):
# Fully known shape.
rnd1 = random_ops.random_uniform([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.random_uniform(
array_ops.placeholder(
dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.random_uniform(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
if __name__ == "__main__":
test.main()
|
openhdf/enigma2-wetek | refs/heads/master | lib/python/Components/Converter/ServiceOrbitalPosition.py | 102 | # -*- coding: utf-8 -*-
from Components.Converter.Converter import Converter
from enigma import iServiceInformation, iPlayableService, iPlayableServicePtr, eServiceCenter
from ServiceReference import resolveAlternate
from Components.Element import cached
class ServiceOrbitalPosition(Converter, object):
FULL = 0
SHORT = 1
def __init__(self, type):
Converter.__init__(self, type)
if type == "Short":
self.type = self.SHORT
else:
self.type = self.FULL
@cached
def getText(self):
service = self.source.service
if isinstance(service, iPlayableServicePtr):
info = service and service.info()
ref = None
else: # reference
info = service and self.source.info
ref = service
if not info:
return ""
if ref:
nref = resolveAlternate(ref)
if nref:
ref = nref
info = eServiceCenter.getInstance().info(ref)
transponder_info = info.getInfoObject(ref, iServiceInformation.sTransponderData)
else:
transponder_info = info.getInfoObject(iServiceInformation.sTransponderData)
if transponder_info:
tunerType = transponder_info["tuner_type"]
if tunerType == "DVB-S":
pos = int(transponder_info["orbital_position"])
direction = 'E'
if pos > 1800:
pos = 3600 - pos
direction = 'W'
if self.type == self.SHORT:
return "%d.%d%s" % (pos/10, pos%10, direction)
else:
return "%d.%d° %s" % (pos/10, pos%10, direction)
return tunerType
if ref:
refString = ref.toString().lower()
if "%3a//" in refString:
return _("Stream")
if refString.startswith("1:134:"):
return _("Alternative")
return ""
text = property(getText)
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in [iPlayableService.evStart]:
Converter.changed(self, what)
|
stephenpascoe/esgf-pyclient | refs/heads/master | test/test_opendap.py | 2 | """
test for secure opendap support
"""
import tempfile
import os
import os.path as op
import shutil
from unittest import TestCase
import re
from pyesgf.logon import LogonManager, DAP_CONFIG_MARKER
from pyesgf.search import SearchConnection
from test.config import TEST_SERVICE
class TestOpendap(TestCase):
def setUp(self):
self.esgf_dir = tempfile.mkdtemp(prefix='pyesgf_tmp')
self.dap_config = op.join(self.esgf_dir, '.dodsrc')
# NetCDF DAP support looks in CWD for configuration
self.orig_dir = os.getcwd()
os.chdir(self.esgf_dir)
def tearDown(self):
os.chdir(self.orig_dir)
if op.exists(self.esgf_dir):
shutil.rmtree(self.esgf_dir)
def init_config(self, config):
with open(self.dap_config, 'w') as fh:
fh.write(config)
def read_config(self):
return open(self.dap_config).read()
def check_preamble(self, preamble, config):
return re.match((preamble +
r'\s*^# BEGIN {0}'.format(DAP_CONFIG_MARKER)),
config, re.M | re.S)
def check_postamble(self, postamble, config):
return re.search(r'^# END {0}$\s*{1}'.format(DAP_CONFIG_MARKER, postamble),
config, re.M | re.S)
def test_config1(self):
# Create the config file from scratch
lm = LogonManager(self.esgf_dir, dap_config=self.dap_config)
config = self.read_config()
print config
assert re.match(r'\s*^# BEGIN {0}$.*^# END {0}$'.format(DAP_CONFIG_MARKER),
config, re.M | re.S)
def test_config2(self):
# Create the config when one already exists. Check it is retained.
lines = ['# Welcome to my config file', 'SOME_OPT=foo', '']
preamble = '\n'.join(lines)
self.init_config(preamble)
lm = LogonManager(self.esgf_dir, dap_config=self.dap_config)
config = self.read_config()
print config
assert self.check_preamble(preamble, config)
def test_config3(self):
# Create the config when one already exists with the BEGIN section in it
lines = ['# Welcome to my config file', 'SOME_OPT=foo', '']
preamble = '\n'.join(lines)
lines = ['', '# Some more config here', 'OTHER_OPT=bar', '']
postamble = '\n'.join(lines)
config = '''\
{0}
# BEGIN <<< Managed by esgf-pyclient >>>
CURL.VERBOSE=0
CURL.COOKIEJAR=/tmp/foo/certificates/.dods_cookies
CURL.SSL.VALIDATE=1
CURL.SSL.CERTIFICATE=/tmp/foo/certificates/credentials.pem
CURL.SSL.KEY=/tmp/foo/certificates/credentials.pem
CURL.SSL.CAPATH=/tmp/foo/certificates/certificates
# END <<< Managed by esgf-pyclient >>>
{1}
'''.format(preamble, postamble)
self.init_config(config)
lm = LogonManager(self.esgf_dir, dap_config=self.dap_config)
config1 = self.read_config()
print config1
assert self.check_preamble(preamble, config1)
assert self.check_postamble(postamble, config1)
def test_open_url(self):
import netCDF4
lm = LogonManager(self.esgf_dir, dap_config=self.dap_config)
print 'Using dap_config at %s' % self.dap_config
conn = SearchConnection(TEST_SERVICE, distrib=False)
#!TODO: replace with request for specific dataset
ctx = conn.new_context(project='CMIP5')
results = ctx.search()
r1 = results[0]
f_ctx = r1.file_context()
file_results = f_ctx.search()
opendap_url = file_results[0].opendap_url
print 'OPeNDAP URL is %s' % opendap_url
ds = netCDF4.Dataset(opendap_url)
print ds.variables.keys()
test_open_url.__test__ = False
#!TODO: more corner cases to test for in DAP_CONFIG
|
halwai/cnn-rnn-siamese-video-similarity | refs/heads/master | finetune.py | 1 | #! /usr/bin/env python
import tensorflow as tf
import numpy as np
import re
import os
import time
import datetime
import gc
from helper import InputHelper, save_plot, compute_distance
from siamese_network import SiameseLSTM
import gzip
from random import random
from amos import Conv
# Parameters
# ==================================================
tf.flags.DEFINE_integer("embedding_dim", 1000, "Dimensionality of character embedding (default: 300)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularizaion lambda (default: 0.0)")
tf.flags.DEFINE_string("training_file_path", "/home/tushar/abhijeet/gta/final/", "training folder (default: /home/halwai/gta_data/final)")
tf.flags.DEFINE_integer("max_frames", 20, "Maximum Number of frame (default: 20)")
tf.flags.DEFINE_string("name", "result", "prefix names of the output files(default: result)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 4, "Batch Size (default: 10)")
tf.flags.DEFINE_integer("num_epochs", 10, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("checkpoint_every", 1, "Save model after this many epochs (default: 100)")
tf.flags.DEFINE_integer("num_lstm_layers", 1, "Number of LSTM layers(default: 1)")
tf.flags.DEFINE_integer("hidden_dim", 50, "Number of LSTM layers(default: 2)")
tf.flags.DEFINE_string("loss", "contrastive", "Type of Loss functions:: contrastive/AAAI(default: contrastive)")
tf.flags.DEFINE_boolean("projection", False, "Project Conv Layers Output to a Lower Dimensional Embedding (Default: True)")
tf.flags.DEFINE_boolean("conv_net_training", False, "Training ConvNet (Default: False)")
tf.flags.DEFINE_float("lr", 0.0000001, "learning-rate(default: 0.00001)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", False, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_integer("return_outputs", 1, "Outpust from LSTM, 0=>Last LSMT output, 2=> Cell-State Output. 1=> Hidden-State Output (default: 2)")
tf.flags.DEFINE_string("summaries_dir", "/home/tushar/abhijeet/gta/summaries/", "Summary storage")
#Conv Net Parameters
tf.flags.DEFINE_string("conv_layer", "pool6", "CNN features from AMOSNet(default: pool6)")
#Model-Restore Parameters
tf.flags.DEFINE_string("model", "/home/tushar/abhijeet/gta/runs/15a/checkpoints/model-10998", "Load trained model checkpoint (Default: None)")
tf.flags.DEFINE_string("train_file_positive", "./annotation_files/alderly_positives_train.txt", "Positive_training_file")
tf.flags.DEFINE_string("train_file_negative", "./annotation_files/alderly_negatives_train.txt", "Negative_training_file")
tf.flags.DEFINE_integer("train_val_ratio", 10, "learning-rate(default:10%)")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
checkpoint_file = FLAGS.model
print(checkpoint_file)
if FLAGS.training_file_path==None:
print("Input Files List is empty. use --training_file_path argument.")
exit()
inpH = InputHelper()
train_set, dev_set, sum_no_of_batches = inpH.getDataSets(FLAGS.training_file_path, FLAGS.max_frames, FLAGS.train_val_ratio, FLAGS.train_file_positive, FLAGS.train_file_negative, FLAGS.batch_size)
# Training
# ==================================================
print("starting graph def")
graph = tf.Graph()
with graph.as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement,
gpu_options=gpu_options,
)
sess = tf.Session(config=session_conf)
print("started session")
with sess.as_default():
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(FLAGS.lr, name="Adam_finetune")
convModel_input_imgs = graph.get_operation_by_name("input_imgs").outputs[0]
convModel_features = graph.get_operation_by_name("conv/output").outputs[0]
siameseModel_input_x1 = graph.get_operation_by_name("input_x1").outputs[0]
siameseModel_input_x2 = graph.get_operation_by_name("input_x2").outputs[0]
siameseModel_input_y = graph.get_operation_by_name("input_y").outputs[0]
siameseModel_video_lengths = graph.get_operation_by_name("video_lengths").outputs[0]
siameseModel_out1 = graph.get_operation_by_name("output/concat").outputs[0]
siameseModel_out2 = graph.get_operation_by_name("output/concat_1").outputs[0]
siameseModel_distance = graph.get_operation_by_name("distance").outputs[0]
siameseModel_loss = graph.get_operation_by_name("loss/div_1").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
print("loaded all variables")
grads_and_vars=optimizer.compute_gradients(siameseModel_loss)
tr_op_set = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join("/home/tushar/abhijeet/gta/", "runs", FLAGS.name))
print("Writing to {}\n".format(out_dir))
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
lstm_savepath="/home/tushar/abhijeet/gta/lstm_outputs"
if not os.path.exists(lstm_savepath):
os.makedirs(lstm_savepath)
#saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
#print all trainable parameters
tvar = tf.trainable_variables()
for i, var in enumerate(tvar):
print("{}".format(var.name))
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', graph=tf.get_default_graph())
val_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/val' , graph=tf.get_default_graph())
graph_def = tf.get_default_graph().as_graph_def()
graphpb_txt = str(graph_def)
with open(os.path.join(checkpoint_dir, "graphpb.txt"), 'w') as f:
f.write(graphpb_txt)
sess.run(tf.global_variables_initializer())
saver.restore(sess, checkpoint_file)
print("Restored Model")
def train_step(x1_batch, x2_batch, y_batch, video_lengths):
#A single training step
[x1_batch] = sess.run([convModel_features], feed_dict={convModel_input_imgs: x1_batch})
[x2_batch] = sess.run([convModel_features], feed_dict={convModel_input_imgs: x2_batch})
feed_dict = {
siameseModel_input_x1: x1_batch,
siameseModel_input_x2: x2_batch,
siameseModel_input_y: y_batch,
dropout_keep_prob: FLAGS.dropout_keep_prob,
siameseModel_video_lengths: video_lengths,
}
out1, out2, _, step, loss, dist = sess.run([siameseModel_out1, siameseModel_out2, tr_op_set, global_step, siameseModel_loss, siameseModel_distance], feed_dict)
time_str = datetime.datetime.now().isoformat()
d=compute_distance(dist, FLAGS.loss)
correct = y_batch==d
#print(out1, out2)
#print(video_lengths)
#print("TRAIN {}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, correct))
print(y_batch, dist, d)
return np.sum(correct), loss
def dev_step(x1_batch, x2_batch, y_batch, video_lengths, dev_iter, epoch):
#A single training step
[x1_batch] = sess.run([convModel_features], feed_dict={convModel_input_imgs: x1_batch})
[x2_batch] = sess.run([convModel_features], feed_dict={convModel_input_imgs: x2_batch})
feed_dict = {
siameseModel_input_x1: x1_batch,
siameseModel_input_x2: x2_batch,
siameseModel_input_y: y_batch,
dropout_keep_prob: FLAGS.dropout_keep_prob,
siameseModel_video_lengths: video_lengths,
}
step, loss, dist, out1, out2 = sess.run([global_step, siameseModel_loss, siameseModel_distance, siameseModel_out1,siameseModel_out2], feed_dict)
time_str = datetime.datetime.now().isoformat()
d=compute_distance(dist, FLAGS.loss)
correct = y_batch==d
#print("DEV {}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, correct))
#print(y_batch, dist, d)
return np.sum(correct), loss, correct
# Generate batches
batches=inpH.batch_iter(
train_set[0], train_set[1], train_set[2], train_set[3], FLAGS.batch_size, FLAGS.num_epochs, [[104, 114, 124], (227, 227)], shuffle=True, is_train=False)
ptr=0
max_validation_correct=0.0
start_time = time.time()
train_accuracy, val_accuracy = [] , []
train_loss, val_loss = [], []
train_batch_loss_arr, val_batch_loss_arr = [], []
for nn in xrange(FLAGS.num_epochs):
# Evaluate on Validataion Data for every epoch
sum_val_correct=0.0
val_epoch_loss=0.0
val_results = []
print("\nEvaluation:")
dev_batches = inpH.batch_iter(dev_set[0],dev_set[1],dev_set[2],dev_set[3], FLAGS.batch_size, 1, [[104, 114, 124], (227, 227)], shuffle=False , is_train=False)
dev_iter=0
for (x1_dev_b,x2_dev_b,y_dev_b, dev_video_lengths) in dev_batches:
if len(y_dev_b)<1:
continue
dev_iter += 1
batch_val_correct , val_batch_loss, batch_results = dev_step(x1_dev_b, x2_dev_b, y_dev_b, dev_video_lengths, dev_iter,nn)
val_results = np.concatenate([val_results, batch_results])
sum_val_correct = sum_val_correct + batch_val_correct
current_step = tf.train.global_step(sess, global_step)
#val_writer.add_summary(summary, current_step)
val_epoch_loss = val_epoch_loss + val_batch_loss*len(y_dev_b)
val_batch_loss_arr.append(val_batch_loss*len(y_dev_b))
print("val_loss ={}".format(val_epoch_loss/len(dev_set[2])))
print("total_val_correct={}/total_val={}".format(sum_val_correct, len(dev_set[2])))
val_accuracy.append(sum_val_correct*1.0/len(dev_set[2]))
val_loss.append(val_epoch_loss/len(dev_set[2]))
print("Epoch Number: {}".format(nn))
epoch_start_time = time.time()
sum_train_correct=0.0
train_epoch_loss=0.0
for kk in xrange(sum_no_of_batches):
x1_batch, x2_batch, y_batch, video_lengths = batches.next()
if len(y_batch)<1:
continue
train_batch_correct, train_batch_loss =train_step(x1_batch, x2_batch, y_batch, video_lengths)
#train_writer.add_summary(summary, current_step)
sum_train_correct = sum_train_correct + train_batch_correct
train_epoch_loss = train_epoch_loss + train_batch_loss* len(y_batch)
train_batch_loss_arr.append(train_batch_loss*len(y_batch))
print("train_loss ={}".format(train_epoch_loss/len(train_set[2])))
print("total_train_correct={}/total_train={}".format(sum_train_correct, len(train_set[2])))
train_accuracy.append(sum_train_correct*1.0/len(train_set[2]))
train_loss.append(train_epoch_loss/len(train_set[2]))
# Update stored model
"""if current_step % (FLAGS.checkpoint_every) == 0:
if sum_val_correct >= max_validation_correct:
max_validation_correct = sum_val_correct
#saver.save(sess, checkpoint_prefix, global_step=current_step)
tf.train.write_graph(sess.graph.as_graph_def(), checkpoint_prefix, "graph"+str(nn)+".pb", as_text=False)
print("Saved model {} with checkpoint to {}".format(nn, checkpoint_prefix))"""
epoch_end_time = time.time()
print("Total time for {} th-epoch is {}\n".format(nn, epoch_end_time-epoch_start_time))
save_plot(train_accuracy, val_accuracy, 'epochs', 'accuracy', 'Accuracy vs epochs', [-0.1, nn+0.1, 0, 1.01], ['train','val' ],'./accuracy_'+str(FLAGS.name))
save_plot(train_loss, val_loss, 'epochs', 'loss', 'Loss vs epochs', [-0.1, nn+0.1, 0, np.max(train_loss)+0.2], ['train','val' ],'./loss_'+str(FLAGS.name))
save_plot(train_batch_loss_arr, val_batch_loss_arr, 'steps', 'loss', 'Loss vs steps', [-0.1, (nn+1)*sum_no_of_batches+0.1, 0, np.max(train_batch_loss_arr)+0.2], ['train','val' ],'./loss_batch_'+str(FLAGS.name))
end_time = time.time()
print("Total time for {} epochs is {}".format(FLAGS.num_epochs, end_time-start_time))
#"""
|
arseny-tsyro/python_training | refs/heads/master | fixture/application.py | 1 | __author__ = 'Arseniy'
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == 'firefox':
self.wd = webdriver.Firefox()
elif browser == 'chrome':
self.wd = webdriver.Chrome()
elif browser == 'ie':
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def load_login_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
|
timblakely/bigbrain | refs/heads/master | bigbrain/bigbrain/oauth2/httplib2/socks.py | 91 | """SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import base64
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if (self.__proxy[4] != None and self.__proxy[5] != None):
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + ":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth)
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
headers += ["Host: ", destaddr, "\r\n"]
if (self.__proxy[4] != None and self.__proxy[5] != None):
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1],portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0],destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
|
mats116/ElasticBigQuery | refs/heads/master | dmp/external/pyasn1/compat/octets.py | 96 | from sys import version_info
if version_info[0] <= 2:
int2oct = chr
ints2octs = lambda s: ''.join([ int2oct(x) for x in s ])
null = ''
oct2int = ord
octs2ints = lambda s: [ oct2int(x) for x in s ]
str2octs = lambda x: x
octs2str = lambda x: x
isOctetsType = lambda s: isinstance(s, str)
isStringType = lambda s: isinstance(s, (str, unicode))
else:
ints2octs = bytes
int2oct = lambda x: ints2octs((x,))
null = ints2octs()
oct2int = lambda x: x
octs2ints = lambda s: [ x for x in s ]
str2octs = lambda x: x.encode()
octs2str = lambda x: x.decode()
isOctetsType = lambda s: isinstance(s, bytes)
isStringType = lambda s: isinstance(s, str)
|
bugfree-software/the-internet-solution-python | refs/heads/master | tests/test_windows.py | 1 | from . import TheInternetTestCase
from helium.api import find_all, Window, click, Text, switch_to
class WindowsTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/windows"
def test_open_new_window(self):
windows = find_all(Window())
self.assertEqual(len(windows), 1)
self.assertTrue(Text("Opening a new window").exists())
main_window = windows[0]
click("Click Here")
windows = find_all(Window())
self.assertEqual(len(windows), 2)
self.assertTrue(Text("New Window").exists())
switch_to(main_window)
self.assertTrue(Text("Opening a new window").exists()) |
wemanuel/smry | refs/heads/master | Crypto/SelfTest/Hash/test_SHA512.py | 116 | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/test_SHA512.py: Self-test for the SHA-512 hash function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Hash.SHA512"""
__revision__ = "$Id$"
# Test vectors from various sources
# This is a list of (expected_result, input[, description]) tuples.
test_data = [
# RFC 4634: Section Page 8.4, "Test 1"
('ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f', 'abc'),
# RFC 4634: Section Page 8.4, "Test 2.1"
('8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909', 'abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu'),
# RFC 4634: Section Page 8.4, "Test 3"
('e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973ebde0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b', 'a' * 10**6, "'a' * 10**6"),
# Taken from http://de.wikipedia.org/wiki/Secure_Hash_Algorithm
('cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e', ''),
('af9ed2de700433b803240a552b41b5a472a6ef3fe1431a722b2063c75e9f07451f67a28e37d09cde769424c96aea6f8971389db9e1993d6c565c3c71b855723c', 'Franz jagt im komplett verwahrlosten Taxi quer durch Bayern'),
]
def get_tests(config={}):
from Crypto.Hash import SHA512
from common import make_hash_tests
return make_hash_tests(SHA512, "SHA512", test_data,
digest_size=64,
oid="\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03")
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
jkugler/ansible | refs/heads/devel | lib/ansible/plugins/lookup/etcd.py | 151 | # (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
try:
import json
except ImportError:
import simplejson as json
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url
# this can be made configurable, not should not use ansible.cfg
ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001'
if os.getenv('ANSIBLE_ETCD_URL') is not None:
ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL']
class Etcd:
def __init__(self, url=ANSIBLE_ETCD_URL, validate_certs=True):
self.url = url
self.baseurl = '%s/v1/keys' % (self.url)
self.validate_certs = validate_certs
def get(self, key):
url = "%s/%s" % (self.baseurl, key)
data = None
value = ""
try:
r = open_url(url, validate_certs=self.validate_certs)
data = r.read()
except:
return value
try:
# {"action":"get","key":"/name","value":"Jane Jolie","index":5}
item = json.loads(data)
if 'value' in item:
value = item['value']
if 'errorCode' in item:
value = "ENOENT"
except:
raise
pass
return value
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
validate_certs = kwargs.get('validate_certs', True)
etcd = Etcd(validate_certs=validate_certs)
ret = []
for term in terms:
key = term.split()[0]
value = etcd.get(key)
ret.append(value)
return ret
|
karec/oct | refs/heads/master | tests/test_report.py | 2 | import os
import unittest
from oct.results.report import ReportResults
from oct.results.models import set_database, db
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
class ReportTest(unittest.TestCase):
def test_empty_results(self):
"""Test report with empty results
"""
set_database(os.path.join(BASE_DIR, 'fixtures', 'empty_results.sqlite'), db, {})
report = ReportResults(60, 10)
report.compile_results()
|
mchristopher/PokemonGo-DesktopMap | refs/heads/master | app/pywin/Lib/lib2to3/fixes/fix_apply.py | 315 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for apply().
This converts apply(func, v, k) into (func)(*v, **k)."""
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Call, Comma, parenthesize
class FixApply(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'apply'
trailer<
'('
arglist<
(not argument<NAME '=' any>) func=any ','
(not argument<NAME '=' any>) args=any [','
(not argument<NAME '=' any>) kwds=any] [',']
>
')'
>
>
"""
def transform(self, node, results):
syms = self.syms
assert results
func = results["func"]
args = results["args"]
kwds = results.get("kwds")
prefix = node.prefix
func = func.clone()
if (func.type not in (token.NAME, syms.atom) and
(func.type != syms.power or
func.children[-2].type == token.DOUBLESTAR)):
# Need to parenthesize
func = parenthesize(func)
func.prefix = ""
args = args.clone()
args.prefix = ""
if kwds is not None:
kwds = kwds.clone()
kwds.prefix = ""
l_newargs = [pytree.Leaf(token.STAR, u"*"), args]
if kwds is not None:
l_newargs.extend([Comma(),
pytree.Leaf(token.DOUBLESTAR, u"**"),
kwds])
l_newargs[-2].prefix = u" " # that's the ** token
# XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
# can be translated into f(x, y, *t) instead of f(*(x, y) + t)
#new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
return Call(func, l_newargs, prefix=prefix)
|
robhudson/zamboni | refs/heads/master | apps/access/helpers.py | 22 | import jinja2
from jingo import register
import acl
@register.function
@jinja2.contextfunction
def check_ownership(context, object, require_owner=False,
require_author=False, ignore_disabled=True):
return acl.check_ownership(context['request'], object,
require_owner=require_owner,
require_author=require_author,
ignore_disabled=ignore_disabled)
@register.function
@jinja2.contextfunction
def action_allowed(context, app, action):
return acl.action_allowed(context['request'], app, action)
|
opennode/waldur-mastermind | refs/heads/develop | src/waldur_mastermind/analytics/models.py | 2 | from django.contrib.contenttypes import fields as ct_fields
from django.contrib.contenttypes import models as ct_models
from django.db import models
from waldur_core.core.managers import GenericKeyMixin
class QuotaManager(GenericKeyMixin, models.Manager):
def update_or_create_quota(self, scope, name, date, usage):
content_type = ct_models.ContentType.objects.get_for_model(scope)
return self.update_or_create(
content_type=content_type,
object_id=scope.pk,
name=name,
date=date,
defaults=dict(usage=usage),
)
class DailyQuotaHistory(models.Model):
"""
This model stores quota usage history per day.
It is designed to store derived data optimized for dashboard charts.
See also related design pattern:
https://martinfowler.com/bliki/ReportingDatabase.html
"""
content_type = models.ForeignKey(on_delete=models.CASCADE, to=ct_models.ContentType)
object_id = models.PositiveIntegerField()
scope = ct_fields.GenericForeignKey('content_type', 'object_id')
objects = QuotaManager()
name = models.CharField(max_length=150, db_index=True)
usage = models.BigIntegerField()
date = models.DateField()
class Meta:
unique_together = ('content_type', 'object_id', 'name', 'date')
|
nricklin/PyGithub | refs/heads/master | github/Notification.py | 74 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Peter Golm <golm.peter@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.Repository
import github.NotificationSubject
class Notification(github.GithubObject.CompletableGithubObject):
"""
This class represents Notifications. The reference can be found here http://developer.github.com/v3/activity/notifications/
"""
@property
def id(self):
"""
:type: string
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def last_read_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._last_read_at)
return self._last_read_at.value
@property
def repository(self):
"""
:type: :class:`github.Repository.Repository`
"""
self._completeIfNotSet(self._repository)
return self._repository.value
@property
def subject(self):
"""
:type: :class:`github.NotificationSubject.NotificationSubject`
"""
self._completeIfNotSet(self._subject)
return self._subject.value
@property
def reason(self):
"""
:type: string
"""
self._completeIfNotSet(self._reason)
return self._reason.value
@property
def subscription_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscription_url)
return self._subscription_url.value
@property
def unread(self):
"""
:type: bool
"""
self._completeIfNotSet(self._unread)
return self._unread.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def _initAttributes(self):
self._id = github.GithubObject.NotSet
self._last_read_at = github.GithubObject.NotSet
self._repository = github.GithubObject.NotSet
self._reason = github.GithubObject.NotSet
self._subscription_url = github.GithubObject.NotSet
self._unread = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "id" in attributes: # pragma no branch
self._id = self._makeStringAttribute(attributes["id"])
if "last_read_at" in attributes: # pragma no branch
self._last_read_at = self._makeDatetimeAttribute(attributes["last_read_at"])
if "repository" in attributes: # pragma no branch
self._repository = self._makeClassAttribute(github.Repository.Repository, attributes["repository"])
if "subject" in attributes: # pragma no branch
self._subject = self._makeClassAttribute(github.NotificationSubject.NotificationSubject, attributes["subject"])
if "reason" in attributes: # pragma no branch
self._reason = self._makeStringAttribute(attributes["reason"])
if "subscription_url" in attributes: # pragma no branch
self._subscription_url = self._makeStringAttribute(attributes["subscription_url"])
if "unread" in attributes: # pragma no branch
self._unread = self._makeBoolAttribute(attributes["unread"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
bliti/pydweet | refs/heads/master | example/main.py | 1 | #!/usr/bin/env python
from dweet import Dweet
if __name__ == "__main__":
dweet = Dweet()
#dweet an dweet without a thing name. Returns a a thing name in the response
print dweet.dweet({"hello": "world"})
#dweet with a thing name
print dweet.dweet_by_name(name="test_thing", data={"hello": "world"})
#get the latest dweet by thing name. Only returns one dweet.
print dweet.latest_dweet(name="test_thing")
#get all the dweets by dweet name.
print dweet.all_dweets(name="test_thing") |
simone/django-gb | refs/heads/master | tests/view_tests/default_urls.py | 65 | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# This is the same as in the default project template
url(r'^admin/', include(admin.site.urls)),
)
|
mdworks2016/work_development | refs/heads/master | Python/20_Third_Certification/venv/lib/python3.7/site-packages/django/contrib/auth/validators.py | 55 | import re
from django.core import validators
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _
@deconstructible
class ASCIIUsernameValidator(validators.RegexValidator):
regex = r'^[\w.@+-]+\Z'
message = _(
'Enter a valid username. This value may contain only English letters, '
'numbers, and @/./+/-/_ characters.'
)
flags = re.ASCII
@deconstructible
class UnicodeUsernameValidator(validators.RegexValidator):
regex = r'^[\w.@+-]+\Z'
message = _(
'Enter a valid username. This value may contain only letters, '
'numbers, and @/./+/-/_ characters.'
)
flags = 0
|
CoderDojoSG/todo | refs/heads/master | todo5/application.py | 2 | from flask import Flask
from flask import make_response
from flask import request
from flask import session
from flask import render_template
from flask import redirect
from flask import url_for
from flask.ext.mongoengine import MongoEngine
import logging
from logging.handlers import RotatingFileHandler
app = Flask(__name__)
app.config['MONGODB_SETTINGS'] = {
'db': 'todo',
'host': 'dogen.mongohq.com',
'port': 10012,
'username': 'tododbo',
'password': 'everclear'
}
db = MongoEngine(app)
class User(db.Document):
username = db.StringField(required=True,max_length=64)
password = db.StringField(required=True,max_length=64)
items = db.ListField(db.StringField(max_length=256))
@app.route('/')
def index():
app.logger.info('index')
if 'username' in session:
user = get_user(session['username'])
return render_template('index.html', user=user)
return redirect(url_for('login'))
@app.route('/register', methods=['GET','POST'])
def register():
app.logger.info('register')
if request.method == 'POST':
session['username'] = request.form['username']
User(username=request.form['username'],password=request.form['password']).save()
user = User.objects(username = session['username']).first();
if(user):
return redirect(url_for('index'))
else:
return render_template('login.html', error='Invalid username or password')
else:
return render_template('register.html')
@app.route('/login', methods=['GET','POST'])
def login():
app.logger.info('login')
if request.method == 'POST':
if validate_credentials(request.form['username'], request.form['password']):
session['username'] = request.form['username']
return redirect(url_for('index'))
else:
return render_template('login.html', error='Invalid username or password')
else:
return render_template('login.html')
@app.route('/logout')
def logout():
app.logger.info('logout')
session.pop('username', None)
return redirect(url_for('index'))
@app.route('/add', methods=['POST'])
def add():
app.logger.info('add')
if 'username' in session:
User.objects(username=session['username']).update_one(push__items=request.form['item'])
return redirect(url_for('index'))
else:
abort(403)
@app.route('/delete/<item>')
def delete(item):
app.logger.info('delete')
if 'username' in session:
User.objects(username=session['username']).update_one(pull__items=item)
return redirect(url_for('index'))
else:
abort(403)
def validate_credentials(username, password):
user = get_user(username)
return user != None and user.password == password
def get_user(username):
return User.objects(username = username).first()
app.secret_key = 'secret'
if __name__ == '__main__':
handler = RotatingFileHandler('todo.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.run()
|
driquet/dahu | refs/heads/master | dahu/config.py | 1 | # Front end related data
FRONTEND_HOST = 'localhost'
FRONTEND_PORT = 8080
FRONTEND_PREFIX = ''
FRONTEND_THEME = 'default'
# Data related to albums and cache
ALBUMS_PATH = '/Users/driquet/Pictures/Albums/'
CACHE_PATH = '/Users/driquet/Pictures/Albums-cache/'
# Data related to the gallery
GALLERY_TITLE = 'Damien\'s Gallery'
GALLERY_NAME = 'Damien\'s Gallery'
PICTURE_THUMBNAIL_SIZE = 128
PICTURE_SLIDESHOW_SIZE = 1024
ALBUM_THUMBNAIL_SIZE = 256
# Misc
SECRET_KEY = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
|
jhona22baz/blog-flask | refs/heads/master | python2.7/lib/python2.7/site-packages/setuptools/tests/test_sandbox.py | 342 | """develop tests
"""
import sys
import os
import shutil
import unittest
import tempfile
import types
import pkg_resources
import setuptools.sandbox
from setuptools.sandbox import DirectorySandbox, SandboxViolation
def has_win32com():
"""
Run this to determine if the local machine has win32com, and if it
does, include additional tests.
"""
if not sys.platform.startswith('win32'):
return False
try:
mod = __import__('win32com')
except ImportError:
return False
return True
class TestSandbox(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dir)
def test_devnull(self):
if sys.version < '2.4':
return
sandbox = DirectorySandbox(self.dir)
sandbox.run(self._file_writer(os.devnull))
def _file_writer(path):
def do_write():
f = open(path, 'w')
f.write('xxx')
f.close()
return do_write
_file_writer = staticmethod(_file_writer)
if has_win32com():
def test_win32com(self):
"""
win32com should not be prevented from caching COM interfaces
in gen_py.
"""
import win32com
gen_py = win32com.__gen_path__
target = os.path.join(gen_py, 'test_write')
sandbox = DirectorySandbox(self.dir)
try:
try:
sandbox.run(self._file_writer(target))
except SandboxViolation:
self.fail("Could not create gen_py file due to SandboxViolation")
finally:
if os.path.exists(target): os.remove(target)
def test_setup_py_with_BOM(self):
"""
It should be possible to execute a setup.py with a Byte Order Mark
"""
target = pkg_resources.resource_filename(__name__,
'script-with-bom.py')
namespace = types.ModuleType('namespace')
setuptools.sandbox.execfile(target, vars(namespace))
assert namespace.result == 'passed'
if __name__ == '__main__':
unittest.main()
|
meiavy/python-weixin | refs/heads/master | lib/python2.7/site-packages/pip/vendor/html5lib/tokenizer.py | 1710 | from __future__ import absolute_import, division, unicode_literals
try:
chr = unichr # flake8: noqa
except NameError:
pass
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from .inputstream import HTMLInputStream
from .trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
# Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data)
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for i in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
|
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/tangible/ship/components/chassis/shared_tieinterceptor_chassis_token.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/chassis/shared_tieinterceptor_chassis_token.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","tieinterceptor_chassis_token")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
danielmohr/androguard | refs/heads/master | androarsc.py | 38 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys
from optparse import OptionParser
from xml.dom import minidom
import codecs
from androguard.core import androconf
from androguard.core.bytecodes import apk
option_0 = { 'name' : ('-i', '--input'), 'help' : 'filename input (APK or android resources(arsc))', 'nargs' : 1 }
option_1 = { 'name' : ('-p', '--package'), 'help' : 'select the package (optional)', 'nargs' : 1 }
option_2 = { 'name' : ('-l', '--locale'), 'help' : 'select the locale (optional)', 'nargs' : 1 }
option_3 = { 'name' : ('-t', '--type'), 'help' : 'select the type (string, interger, public, ...)', 'nargs' : 1 }
option_4 = { 'name' : ('-o', '--output'), 'help' : 'filename output', 'nargs' : 1 }
option_5 = { 'name' : ('-v', '--version'), 'help' : 'version of the API', 'action' : 'count' }
options = [option_0, option_1, option_2, option_3, option_4, option_5]
def main(options, arguments):
if options.input != None:
buff = ""
arscobj = None
ret_type = androconf.is_android(options.input)
if ret_type == "APK":
a = apk.APK(options.input)
arscobj = a.get_android_resources()
elif ret_type == "ARSC":
arscobj = apk.ARSCParser(open(options.input, "rb").read())
else:
print "Unknown file type"
return
if not options.package and not options.type and not options.locale:
buff = ""
for package in arscobj.get_packages_names():
buff += package + "\n"
for locale in arscobj.get_locales(package):
buff += "\t" + repr(locale) + "\n"
for ttype in arscobj.get_types(package, locale):
buff += "\t\t" + ttype + "\n"
else:
package = options.package or arscobj.get_packages_names()[0]
ttype = options.type or "public"
locale = options.locale or '\x00\x00'
buff = minidom.parseString(getattr(arscobj, "get_" + ttype + "_resources")(package, locale)).toprettyxml()
if options.output != None:
fd = codecs.open(options.output, "w", "utf-8")
fd.write(buff)
fd.close()
else:
print buff
elif options.version != None:
print "Androarsc version %s" % androconf.ANDROGUARD_VERSION
if __name__ == "__main__":
parser = OptionParser()
for option in options:
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
sys.argv[:] = arguments
main(options, arguments)
|
NullScope/BorderlessStone | refs/heads/master | tests/import/relimp/relimp/relimp3.py | 7 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
name = 'relimp.relimp.relimp3'
|
res2k/0install | refs/heads/master | zeroinstall/injector/qdom.py | 3 | """A quick DOM implementation.
Python's xml.dom is very slow. The xml.sax module is also slow (as it imports urllib2).
This is our light-weight version.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from xml.parsers import expat
import zeroinstall
from zeroinstall.injector import versions
_parsed_version = versions.parse_version(zeroinstall.version)
class Element(object):
"""An XML element.
@ivar uri: the element's namespace
@type uri: str
@ivar name: the element's localName
@type name: str
@ivar attrs: the element's attributes (key is in the form [namespace " "] localName)
@type attrs: {str: str}
@ivar childNodes: children
@type childNodes: [L{Element}]
@ivar content: the text content
@type content: str"""
__slots__ = ['uri', 'name', 'attrs', 'childNodes', 'content']
def __init__(self, uri, name, attrs):
"""@type uri: str
@type name: str
@type attrs: {str: str}"""
self.uri = uri
self.name = name
self.attrs = attrs.copy()
self.content = None
self.childNodes = []
def __str__(self):
"""@rtype: str"""
attrs = [n + '=' + self.attrs[n] for n in self.attrs]
start = '<{%s}%s %s' % (self.uri, self.name, ' '.join(attrs))
if self.childNodes:
return start + '>' + '\n'.join(map(str, self.childNodes)) + ('</%s>' % (self.name))
elif self.content:
return start + '>' + self.content + ('</%s>' % (self.name))
else:
return start + '/>'
def getAttribute(self, name):
"""@type name: str
@rtype: str"""
return self.attrs.get(name, None)
def toDOM(self, doc, prefixes):
"""Create a DOM Element for this qdom.Element.
@param doc: document to use to create the element
@type prefixes: L{Prefixes}
@return: the new element"""
elem = prefixes.createElementNS(doc, self.uri, self.name)
for fullname, value in self.attrs.items():
if ' ' in fullname:
ns, localName = fullname.split(' ', 1)
else:
ns, localName = None, fullname
prefixes.setAttributeNS(elem, ns, localName, value)
for child in self.childNodes:
elem.appendChild(child.toDOM(doc, prefixes))
if self.content:
elem.appendChild(doc.createTextNode(self.content))
return elem
class QSAXhandler(object):
"""SAXHandler that builds a tree of L{Element}s"""
def __init__(self, filter_for_version = False):
"""@param filter_for_version: skip elements if their if-0install-version attribute doesn't match L{zeroinstall.version} (since 1.13).
@type filter_for_version: bool
@rtype: bool"""
self.stack = []
if filter_for_version:
self.filter_range = lambda expr: versions.parse_version_expression(expr)(_parsed_version)
else:
self.filter_range = lambda x: True
def startElementNS(self, fullname, attrs):
"""@type fullname: str
@type attrs: {str: str}"""
split = fullname.split(' ', 1)
if len(split) == 2:
self.stack.append(Element(split[0], split[1], attrs))
else:
self.stack.append(Element(None, fullname, attrs))
self.contents = ''
def characters(self, data):
"""@type data: str"""
self.contents += data
def endElementNS(self, name):
"""@type name: str"""
contents = self.contents.strip()
self.stack[-1].content = contents
self.contents = ''
new = self.stack.pop()
if self.stack:
target_versions = new.attrs.get('if-0install-version')
if target_versions and not self.filter_range(target_versions):
return
self.stack[-1].childNodes.append(new)
else:
self.doc = new
def parse(source, filter_for_version = False):
"""Parse an XML stream into a tree of L{Element}s.
@param source: data to parse
@type source: file
@param filter_for_version: skip elements if their if-0install-version attribute doesn't match L{zeroinstall.version} (since 1.13).
@type filter_for_version: bool
@return: the root
@rtype: L{Element}"""
handler = QSAXhandler(filter_for_version)
parser = expat.ParserCreate(namespace_separator = ' ')
parser.StartElementHandler = handler.startElementNS
parser.EndElementHandler = handler.endElementNS
parser.CharacterDataHandler = handler.characters
parser.ParseFile(source)
return handler.doc
class Prefixes(object):
"""Keep track of namespace prefixes. Used when serialising a document.
@since: 0.54
"""
def __init__(self, default_ns):
"""@type default_ns: str"""
self.prefixes = {}
self.default_ns = default_ns
def get(self, ns):
"""@type ns: str
@rtype: str"""
prefix = self.prefixes.get(ns, None)
if prefix:
return prefix
prefix = 'ns%d' % len(self.prefixes)
self.prefixes[ns] = prefix
return prefix
def setAttributeNS(self, elem, uri, localName, value):
"""@type uri: str
@type localName: str
@type value: str"""
if uri is None:
elem.setAttributeNS(None, localName, value)
else:
elem.setAttributeNS(uri, self.get(uri) + ':' + localName, value)
def createElementNS(self, doc, uri, localName):
"""@type uri: str
@type localName: str"""
if uri == self.default_ns:
return doc.createElementNS(uri, localName)
else:
return doc.createElementNS(uri, self.get(uri) + ':' + localName)
|
afaheem88/rally | refs/heads/master | rally/plugins/openstack/context/ceilometer/samples.py | 6 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils as rutils
from rally import consts
from rally.plugins.openstack.scenarios.ceilometer import utils as ceilo_utils
from rally.task import context
LOG = logging.getLogger(__name__)
@context.configure(name="ceilometer", order=450)
class CeilometerSampleGenerator(context.Context):
"""Context for creating samples and collecting resources for benchmarks."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"counter_name": {
"type": "string"
},
"counter_type": {
"type": "string"
},
"counter_unit": {
"type": "string"
},
"counter_volume": {
"type": "number",
"minimum": 0
},
"resources_per_tenant": {
"type": "integer",
"minimum": 1
},
"samples_per_resource": {
"type": "integer",
"minimum": 1
},
},
"required": ["counter_name", "counter_type", "counter_unit",
"counter_volume"],
"additionalProperties": False
}
DEFAULT_CONFIG = {
"resources_per_tenant": 5,
"samples_per_resource": 5
}
@rutils.log_task_wrapper(LOG.info, _("Enter context: `Ceilometer`"))
def setup(self):
counter_name = self.config["counter_name"]
counter_type = self.config["counter_type"]
counter_unit = self.config["counter_unit"]
counter_volume = self.config["counter_volume"]
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
self.context["tenants"][tenant_id]["samples"] = []
self.context["tenants"][tenant_id]["resources"] = []
scenario = ceilo_utils.CeilometerScenario({"user": user})
for i in range(self.config["resources_per_tenant"]):
for j in range(self.config["samples_per_resource"]):
sample = scenario._create_sample(counter_name,
counter_type,
counter_unit,
counter_volume)
self.context["tenants"][tenant_id]["samples"].append(
sample[0].to_dict())
self.context["tenants"][tenant_id]["resources"].append(
sample[0].resource_id)
@rutils.log_task_wrapper(LOG.info, _("Exit context: `Ceilometer`"))
def cleanup(self):
# We don't have API for removal of samples and resources
pass
|
yinquan529/platform-external-chromium_org | refs/heads/master | tools/telemetry/telemetry/core/browser_options.py | 23 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import optparse
import os
import shlex
import sys
from telemetry.core import browser_finder
from telemetry.core import profile_types
from telemetry.core import repeat_options
from telemetry.core import util
from telemetry.core import wpr_modes
from telemetry.core.platform.profiler import profiler_finder
util.AddDirToPythonPath(
util.GetChromiumSrcDir(), 'third_party', 'webpagereplay')
import net_configs # pylint: disable=F0401
class BrowserFinderOptions(optparse.Values):
"""Options to be used for discovering a browser."""
def __init__(self, browser_type=None):
optparse.Values.__init__(self)
self.browser_type = browser_type
self.browser_executable = None
self.chrome_root = None
self.android_device = None
self.cros_ssh_identity = None
self.extensions_to_load = []
# If set, copy the generated profile to this path on exit.
self.output_profile_path = None
self.cros_remote = None
self.profiler = None
self.verbosity = 0
self.page_filter = None
self.page_filter_exclude = None
self.page_label_filter = None
self.page_label_filter_exclude = None
self.repeat_options = repeat_options.RepeatOptions()
self.browser_options = BrowserOptions()
self.output_file = None
self.skip_navigate_on_repeat = False
self.android_rndis = False
def Copy(self):
return copy.deepcopy(self)
def CreateParser(self, *args, **kwargs):
parser = optparse.OptionParser(*args, **kwargs)
# Selection group
group = optparse.OptionGroup(parser, 'Which browser to use')
group.add_option('--browser',
dest='browser_type',
default=None,
help='Browser type to run, '
'in order of priority. Supported values: list,%s' %
','.join(browser_finder.ALL_BROWSER_TYPES))
group.add_option('--browser-executable',
dest='browser_executable',
help='The exact browser to run.')
group.add_option('--chrome-root',
dest='chrome_root',
help='Where to look for chrome builds.'
'Defaults to searching parent dirs by default.')
group.add_option('--device',
dest='android_device',
help='The android device ID to use'
'If not specified, only 0 or 1 connected devcies are supported.')
group.add_option(
'--remote',
dest='cros_remote',
help='The IP address of a remote ChromeOS device to use.')
identity = None
testing_rsa = os.path.join(
util.GetChromiumSrcDir(),
'third_party', 'chromite', 'ssh_keys', 'testing_rsa')
if os.path.exists(testing_rsa):
identity = testing_rsa
group.add_option('--identity',
dest='cros_ssh_identity',
default=identity,
help='The identity file to use when ssh\'ing into the ChromeOS device')
parser.add_option_group(group)
# Page set options
group = optparse.OptionGroup(parser, 'Page set options')
group.add_option('--pageset-shuffle', action='store_true',
dest='pageset_shuffle',
help='Shuffle the order of pages within a pageset.')
group.add_option('--pageset-shuffle-order-file',
dest='pageset_shuffle_order_file', default=None,
help='Filename of an output of a previously run test on the current ' +
'pageset. The tests will run in the same order again, overriding ' +
'what is specified by --page-repeat and --pageset-repeat.')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Web Page Replay options')
group.add_option('--allow-live-sites',
dest='allow_live_sites', action='store_true',
help='Run against live sites if the Web Page Replay archives don\'t '
'exist. Without this flag, the test will just fail instead '
'of running against live sites.')
parser.add_option_group(group)
# Debugging options
group = optparse.OptionGroup(parser, 'When things go wrong')
profiler_choices = profiler_finder.GetAllAvailableProfilers()
group.add_option(
'--profiler', default=None, type='choice',
choices=profiler_choices,
help=('Record profiling data using this tool. Supported values: ' +
', '.join(profiler_choices)))
group.add_option(
'--interactive', dest='interactive', action='store_true',
help=('Let the user interact with the page; the actions specified for '
'the page are not run.'))
group.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed)')
group.add_option('--print-bootstrap-deps',
action='store_true',
help='Output bootstrap deps list.')
parser.add_option_group(group)
# Platform options
group = optparse.OptionGroup(parser, 'Platform options')
group.add_option('--no-performance-mode', action='store_true',
help='Some platforms run on "full performance mode" where the '
'test is executed at maximum CPU speed in order to minimize noise '
'(specially important for dashboards / continuous builds). '
'This option prevents Telemetry from tweaking such platform settings.')
group.add_option('--android-rndis', dest='android_rndis', default=False,
action='store_true', help='Use RNDIS forwarding on Android.')
group.add_option('--no-android-rndis', dest='android_rndis',
action='store_false', help='Do not use RNDIS forwarding on Android.'
' [default]')
parser.add_option_group(group)
# Repeat options.
self.repeat_options.AddCommandLineOptions(parser)
# Browser options.
self.browser_options.AddCommandLineOptions(parser)
real_parse = parser.parse_args
def ParseArgs(args=None):
defaults = parser.get_default_values()
for k, v in defaults.__dict__.items():
if k in self.__dict__ and self.__dict__[k] != None:
continue
self.__dict__[k] = v
ret = real_parse(args, self) # pylint: disable=E1121
if self.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif self.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if self.browser_executable and not self.browser_type:
self.browser_type = 'exact'
if self.browser_type == 'list':
try:
types = browser_finder.GetAllAvailableBrowserTypes(self)
except browser_finder.BrowserFinderException, ex:
sys.stderr.write('ERROR: ' + str(ex))
sys.exit(1)
sys.stdout.write('Available browsers:\n')
sys.stdout.write(' %s\n' % '\n '.join(types))
sys.exit(0)
# Parse repeat options.
self.repeat_options.UpdateFromParseResults(self, parser)
# Parse browser options.
self.browser_options.UpdateFromParseResults(self)
return ret
parser.parse_args = ParseArgs
return parser
def AppendExtraBrowserArgs(self, args):
self.browser_options.AppendExtraBrowserArgs(args)
def MergeDefaultValues(self, defaults):
for k, v in defaults.__dict__.items():
self.ensure_value(k, v)
class BrowserOptions(object):
"""Options to be used for launching a browser."""
def __init__(self):
self.browser_type = None
self.show_stdout = False
# When set to True, the browser will use the default profile. Telemetry
# will not provide an alternate profile directory.
self.dont_override_profile = False
self.profile_dir = None
self.profile_type = None
self._extra_browser_args = set()
self.extra_wpr_args = []
self.wpr_mode = wpr_modes.WPR_OFF
self.netsim = None
self.no_proxy_server = False
self.browser_user_agent_type = None
self.clear_sytem_cache_for_browser_and_profile_on_start = False
self.startup_url = None
# Background pages of built-in component extensions can interfere with
# performance measurements.
self.disable_component_extensions_with_background_pages = True
def AddCommandLineOptions(self, parser):
group = optparse.OptionGroup(parser, 'Browser options')
profile_choices = profile_types.GetProfileTypes()
group.add_option('--profile-type',
dest='profile_type',
type='choice',
default='clean',
choices=profile_choices,
help=('The user profile to use. A clean profile is used by default. '
'Supported values: ' + ', '.join(profile_choices)))
group.add_option('--profile-dir',
dest='profile_dir',
help='Profile directory to launch the browser with. '
'A clean profile is used by default')
group.add_option('--extra-browser-args',
dest='extra_browser_args_as_string',
help='Additional arguments to pass to the browser when it starts')
group.add_option('--extra-wpr-args',
dest='extra_wpr_args_as_string',
help=('Additional arguments to pass to Web Page Replay. '
'See third_party/webpagereplay/replay.py for usage.'))
group.add_option('--netsim', default=None, type='choice',
choices=net_configs.NET_CONFIG_NAMES,
help=('Run benchmark under simulated network conditions. '
'Will prompt for sudo. Supported values: ' +
', '.join(net_configs.NET_CONFIG_NAMES)))
group.add_option('--show-stdout',
action='store_true',
help='When possible, will display the stdout of the process')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Compatibility options')
group.add_option('--gtest_output',
help='Ignored argument for compatibility with runtest.py harness')
parser.add_option_group(group)
def UpdateFromParseResults(self, finder_options):
"""Copies our options from finder_options"""
browser_options_list = [
'extra_browser_args_as_string',
'extra_wpr_args_as_string',
'netsim',
'profile_dir',
'profile_type',
'show_stdout',
]
for o in browser_options_list:
a = getattr(finder_options, o, None)
if a is not None:
setattr(self, o, a)
delattr(finder_options, o)
self.browser_type = finder_options.browser_type
if hasattr(self, 'extra_browser_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_browser_args_as_string) # pylint: disable=E1101
self.AppendExtraBrowserArgs(tmp)
delattr(self, 'extra_browser_args_as_string')
if hasattr(self, 'extra_wpr_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_wpr_args_as_string) # pylint: disable=E1101
self.extra_wpr_args.extend(tmp)
delattr(self, 'extra_wpr_args_as_string')
if self.profile_type == 'default':
self.dont_override_profile = True
if self.profile_dir and self.profile_type != 'clean':
raise Exception("It's illegal to specify both --profile-type and"
" --profile-dir.")
if self.profile_dir and not os.path.isdir(self.profile_dir):
raise Exception("Directory specified by --profile-dir (%s) doesn't"
" exist or isn't a directory." % (self.profile_dir))
if not self.profile_dir:
self.profile_dir = profile_types.GetProfileDir(self.profile_type)
# This deferred import is necessary because browser_options is imported in
# telemetry/telemetry/__init__.py.
from telemetry.core.backends.chrome import chrome_browser_options
finder_options.browser_options = (
chrome_browser_options.CreateChromeBrowserOptions(self))
@property
def extra_browser_args(self):
return self._extra_browser_args
def AppendExtraBrowserArgs(self, args):
if isinstance(args, list):
self._extra_browser_args.update(args)
else:
self._extra_browser_args.add(args)
|
jerradgenson/infinium | refs/heads/master | lib/ui/config.py | 1 | """
An API for interfacing with the Infinium configuration file. The only public
component is ``get_config``, which returns a ``configuration`` object used to
access and update config file fields. There is only one ``configuration``
object, because it is meant to be shared between all parts of the program to
keep configuration options in sync.
The name of the configuration file is '{}'. The configuration loader first
searches for it in the current working directory, then in the location
specified by the '{}' environment variable, and finally in the root of the
Infinium installation directory, extracted from argv[0].
This module is thread safe.
Copyright 2014, 2015 Jerrad M. Genson
This file is part of Infinium.
Infinium is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Infinium is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Infinium. If not, see <http://www.gnu.org/licenses/>.
"""
# Python standard library imports.
import threading
from pathlib import Path
from os import getenv
# Third-party library imports.
from yaml import load, dump
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
# Infinium library imports.
from lib import data
# Module header.
__maintainer__ = data.Developer.JERRAD_GENSON
__contact__ = data.Developer.EMAIL[__maintainer__]
__doc__ = __doc__.format(data.CONFIG_FILE_NAME,
data.CONFIG_VAR,
data.INSTALL_PATH)
def memoize(func):
"""
To be used as a decorator.
Memoize the result of a function call that takes 0 arguments.
This is an unusual thing to do, but we need a way to deal with various
pieces and threads of Infinium reading and writing to the configuration
file, which is unavoidably global state. It would be nice to avoid using
global variables or a singleton, both of which can be abused and result in
an unintuitive API, and one way to accomplish that is by utilizing
memoization instead.
Args:
func: Any function that takes 0 arguments and has no side-effects.
Returns:
A new function that always returns the same object.
"""
return_object = func()
def new_func():
return return_object
new_func.__doc__ = func.__doc__
return new_func
@memoize
def get_config():
"""
Get Infinium Configuration object. The Configuration object is the
interface to the Infinium configuration file. It has getter and setter
properties for every configuration field. Configuration properties are
given a similar name to their field in the configuration file. These can be
listed by calling ``help`` on the Configuration object. The Configuration
object is memoized so that only one instance of Configuration is created,
and shared between callers and threads.
Returns
The Configuration object.
Raises
Any exception that ``Path.open`` and ``yaml.load`` may raise, as well as
``ConfigFileNotFoundError``.
"""
thread_lock = threading.Lock()
class Configuration:
def __init__(self):
cwd_config_path = Path(data.CONFIG_FILE_NAME)
environ_config_dir = Path(getenv(data.CONFIG_VAR,
data.DEFAULT_CONFIG_PATH))
environ_config_path = environ_config_dir / data.CONFIG_FILE_NAME
install_config_path = data.INSTALL_PATH / data.CONFIG_FILE_NAME
# First look for config file in current working directory.
if cwd_config_path.exists():
config_path = Path(data.CONFIG_FILE_NAME)
# Next, look for config file at config environment variable.
elif environ_config_path.exists():
config_path = environ_config_path
# Finally try looking for config file in installation directory.
elif install_config_path.exists():
config_path = install_config_path
# Config file could not be found.
else:
msg = 'The configuration file could not be found.'
raise ConfigFileNotFoundError(msg)
with config_path.open() as config_file:
self.__configuration = load(config_file, Loader)
self.__config_path = config_path
def __update_field(self, section, field, new_value):
new_value = str(new_value)
thread_lock.acquire()
old_value = self.__configuration[section][field]
self.__configuration[section][field] = new_value
try:
with self.__config_path.open('w') as config_file:
dump(self.__configuration,
config_file,
Dumper=Dumper,
default_flow_style=False)
except Exception:
self.__configuration[field] = old_value
raise
finally:
thread_lock.release()
def __get_field(self, section, field):
field = str(field)
thread_lock.acquire()
try:
return self.__configuration[section][field]
except KeyError:
self.__handle_key_error(section, field)
finally:
thread_lock.release()
def __handle_key_error(self, section, field_name):
msg = 'Config file section "{}" field "{}" missing from config file "{}".'
msg = msg.format(section, field_name, self.config_path)
raise ConfigFileCorruptError(msg)
## general section ##
@property
def config_path(self):
return self.__config_path
@property
def model_path(self):
return self.__get_field('general', 'model_path')
@model_path.setter
def model_path(self, value):
self.__update_field('general', 'model_path', value)
@property
def log_path(self):
return self.__get_field('general', 'log_path')
@log_path.setter
def log_path(self, value):
self.__update_field('general', 'log_path', value)
@property
def verbose(self):
return bool(self.__get_field('general', 'verbose'))
@property
def debug(self):
return bool(self.__get_field('general', 'debug'))
## sgd_classifier section ##
@property
def sgd_loss(self):
return self.__get_field('sgd_classifier', 'loss')
@property
def sgd_penalty(self):
return self.__get_field('sgd_classifier', 'penalty')
@property
def sgd_alpha(self):
return float(self.__get_field('sgd_classifier', 'alpha'))
@property
def sgd_l1_ratio(self):
return float(self.__get_field('sgd_classifier', 'l1_ratio'))
@property
def sgd_fit_intercept(self):
return bool(self.__get_field('sgd_classifier', 'fit_intercept'))
@property
def sgd_n_iter(self):
return int(self.__get_field('sgd_classifier', 'n_iter'))
@property
def sgd_shuffle(self):
return bool(self.__get_field('sgd_classifier', 'shuffle'))
@property
def sgd_verbose(self):
return bool(self.__get_field('sgd_classifier', 'verbose'))
@property
def sgd_n_jobs(self):
return int(self.__get_field('sgd_classifier', 'n_jobs'))
@property
def sgd_learning_rate(self):
return self.__get_field('sgd_classifier', 'learning_rate')
@property
def sgd_eta0(self):
return float(self.__get_field('sgd_classifier', 'eta0'))
@property
def sgd_power_t(self):
return float(self.__get_field('sgd_classifier', 'power_t'))
## database section ##
@property
def db_dialect(self):
return self.__get_field('database', 'dialect')
@property
def db_driver(self):
return self.__get_field('database', 'driver')
@property
def db_username(self):
return self.__get_field('database', 'username')
@property
def db_password(self):
return self.__get_field('database', 'password')
@property
def db_host(self):
return self.__get_field('database', 'host')
@property
def db_port(self):
return self.__get_field('database', 'port')
@property
def db_database(self):
return self.__get_field('database', 'database')
@property
def db_echo(self):
return bool(self.__get_field('database', 'echo'))
return Configuration()
class ConfigurationError(Exception):
"""
Indicates a general error with the configuration object. Base Exception
for ``config``. All other exceptions should inherit from this.
"""
pass
class ConfigFileNotFoundError(ConfigurationError):
"""
Indicates the Infinium configuration file could not be found at any of the
locations that ``_Configuration`` checks for it.
"""
pass
class ConfigFileCorruptError(ConfigurationError):
"""
Indicates the configuration file has become corrupted.
Example: name of a field was inadvertently changed.
"""
pass |
boompieman/iim_project | refs/heads/master | project_python2/lib/python2.7/site-packages/pygments/styles/pastie.py | 50 | # -*- coding: utf-8 -*-
"""
pygments.styles.pastie
~~~~~~~~~~~~~~~~~~~~~~
Style similar to the `pastie`_ default style.
.. _pastie: http://pastie.caboo.se/
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class PastieStyle(Style):
"""
Style similar to the pastie default style.
"""
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: '#888888',
Comment.Preproc: 'bold #cc0000',
Comment.Special: 'bg:#fff0f0 bold #cc0000',
String: 'bg:#fff0f0 #dd2200',
String.Regex: 'bg:#fff0ff #008800',
String.Other: 'bg:#f0fff0 #22bb22',
String.Symbol: '#aa6600',
String.Interpol: '#3333bb',
String.Escape: '#0044dd',
Operator.Word: '#008800',
Keyword: 'bold #008800',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#888888',
Name.Class: 'bold #bb0066',
Name.Exception: 'bold #bb0066',
Name.Function: 'bold #0066bb',
Name.Property: 'bold #336699',
Name.Namespace: 'bold #bb0066',
Name.Builtin: '#003388',
Name.Variable: '#336699',
Name.Variable.Class: '#336699',
Name.Variable.Instance: '#3333bb',
Name.Variable.Global: '#dd7700',
Name.Constant: 'bold #003366',
Name.Tag: 'bold #bb0066',
Name.Attribute: '#336699',
Name.Decorator: '#555555',
Name.Label: 'italic #336699',
Number: 'bold #0000DD',
Generic.Heading: '#333',
Generic.Subheading: '#666',
Generic.Deleted: 'bg:#ffdddd #000000',
Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
|
watkinsr/.dotfiles | refs/heads/master | emacs/anaconda-mode/0.1.6/jedi-0.9.0-py2.7.egg/jedi/evaluate/iterable.py | 33 | """
Contains all classes and functions to deal with lists, dicts, generators and
iterators in general.
Array modifications
*******************
If the content of an array (``set``/``list``) is requested somewhere, the
current module will be checked for appearances of ``arr.append``,
``arr.insert``, etc. If the ``arr`` name points to an actual array, the
content will be added
This can be really cpu intensive, as you can imagine. Because |jedi| has to
follow **every** ``append`` and check wheter it's the right array. However this
works pretty good, because in *slow* cases, the recursion detector and other
settings will stop this process.
It is important to note that:
1. Array modfications work only in the current module.
2. Jedi only checks Array additions; ``list.pop``, etc are ignored.
"""
from itertools import chain
from jedi import common
from jedi import debug
from jedi import settings
from jedi._compatibility import use_metaclass, is_py3, unicode
from jedi.parser import tree
from jedi.evaluate import compiled
from jedi.evaluate import helpers
from jedi.evaluate.cache import CachedMetaClass, memoize_default
from jedi.evaluate import analysis
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return list(chain.from_iterable(iterable))
class IterableWrapper(tree.Base):
def is_class(self):
return False
class GeneratorMixin(object):
@memoize_default()
def names_dicts(self, search_global=False): # is always False
dct = {}
executes_generator = '__next__', 'send', 'next'
for names in compiled.generator_obj.names_dict.values():
for name in names:
if name.value in executes_generator:
parent = GeneratorMethod(self, name.parent)
dct[name.value] = [helpers.FakeName(name.name, parent, is_definition=True)]
else:
dct[name.value] = [name]
yield dct
def get_index_types(self, evaluator, index_array):
#debug.warning('Tried to get array access on a generator: %s', self)
analysis.add(self._evaluator, 'type-error-generator', index_array)
return []
def get_exact_index_types(self, index):
"""
Exact lookups are used for tuple lookups, which are perfectly fine if
used with generators.
"""
return [self.iter_content()[index]]
def py__bool__(self):
return True
class Generator(use_metaclass(CachedMetaClass, IterableWrapper, GeneratorMixin)):
"""Handling of `yield` functions."""
def __init__(self, evaluator, func, var_args):
super(Generator, self).__init__()
self._evaluator = evaluator
self.func = func
self.var_args = var_args
def iter_content(self):
""" returns the content of __iter__ """
# Directly execute it, because with a normal call to py__call__ a
# Generator will be returned.
from jedi.evaluate.representation import FunctionExecution
f = FunctionExecution(self._evaluator, self.func, self.var_args)
return f.get_return_types(check_yields=True)
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'parent', 'get_imports',
'doc', 'docstr', 'get_parent_until',
'get_code', 'subscopes']:
raise AttributeError("Accessing %s of %s is not allowed."
% (self, name))
return getattr(self.func, name)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self.func)
class GeneratorMethod(IterableWrapper):
"""``__next__`` and ``send`` methods."""
def __init__(self, generator, builtin_func):
self._builtin_func = builtin_func
self._generator = generator
def py__call__(self, evaluator, params):
# TODO add TypeError if params are given.
return self._generator.iter_content()
def __getattr__(self, name):
return getattr(self._builtin_func, name)
class Comprehension(IterableWrapper):
@staticmethod
def from_atom(evaluator, atom):
mapping = {
'(': GeneratorComprehension,
'[': ListComprehension
}
return mapping[atom.children[0]](evaluator, atom)
def __init__(self, evaluator, atom):
self._evaluator = evaluator
self._atom = atom
@memoize_default()
def eval_node(self):
"""
The first part `x + 1` of the list comprehension:
[x + 1 for x in foo]
"""
comprehension = self._atom.children[1]
# For nested comprehensions we need to search the last one.
last = comprehension.children[-1]
last_comp = comprehension.children[1]
while True:
if isinstance(last, tree.CompFor):
last_comp = last
elif not tree.is_node(last, 'comp_if'):
break
last = last.children[-1]
return helpers.deep_ast_copy(comprehension.children[0], parent=last_comp)
def get_exact_index_types(self, index):
return [self._evaluator.eval_element(self.eval_node())[index]]
def __repr__(self):
return "<e%s of %s>" % (type(self).__name__, self._atom)
class ArrayMixin(object):
@memoize_default()
def names_dicts(self, search_global=False): # Always False.
# `array.type` is a string with the type, e.g. 'list'.
scope = self._evaluator.find_types(compiled.builtin, self.type)[0]
# builtins only have one class -> [0]
scope = self._evaluator.execute(scope, (AlreadyEvaluated((self,)),))[0]
return scope.names_dicts(search_global)
def py__bool__(self):
return None # We don't know the length, because of appends.
class ListComprehension(Comprehension, ArrayMixin):
type = 'list'
def get_index_types(self, evaluator, index):
return self.iter_content()
def iter_content(self):
return self._evaluator.eval_element(self.eval_node())
@property
def name(self):
return FakeSequence(self._evaluator, [], 'list').name
class GeneratorComprehension(Comprehension, GeneratorMixin):
def iter_content(self):
return self._evaluator.eval_element(self.eval_node())
class Array(IterableWrapper, ArrayMixin):
mapping = {'(': 'tuple',
'[': 'list',
'{': 'dict'}
def __init__(self, evaluator, atom):
self._evaluator = evaluator
self.atom = atom
self.type = Array.mapping[atom.children[0]]
"""The builtin name of the array (list, set, tuple or dict)."""
c = self.atom.children
array_node = c[1]
if self.type == 'dict' and array_node != '}' \
and (not hasattr(array_node, 'children')
or ':' not in array_node.children):
self.type = 'set'
@property
def name(self):
return helpers.FakeName(self.type, parent=self)
@memoize_default()
def get_index_types(self, evaluator, index=()):
"""
Get the types of a specific index or all, if not given.
:param index: A subscriptlist node (or subnode).
"""
indexes = create_indexes_or_slices(evaluator, index)
lookup_done = False
types = []
for index in indexes:
if isinstance(index, Slice):
types += [self]
lookup_done = True
elif isinstance(index, compiled.CompiledObject) \
and isinstance(index.obj, (int, str, unicode)):
with common.ignored(KeyError, IndexError, TypeError):
types += self.get_exact_index_types(index.obj)
lookup_done = True
return types if lookup_done else self.values()
@memoize_default()
def values(self):
result = unite(self._evaluator.eval_element(v) for v in self._values())
result += check_array_additions(self._evaluator, self)
return result
def get_exact_index_types(self, mixed_index):
""" Here the index is an int/str. Raises IndexError/KeyError """
if self.type == 'dict':
for key, values in self._items():
# Because we only want the key to be a string.
keys = self._evaluator.eval_element(key)
for k in keys:
if isinstance(k, compiled.CompiledObject) \
and mixed_index == k.obj:
for value in values:
return self._evaluator.eval_element(value)
raise KeyError('No key found in dictionary %s.' % self)
# Can raise an IndexError
return self._evaluator.eval_element(self._items()[mixed_index])
def iter_content(self):
return self.values()
@common.safe_property
def parent(self):
return compiled.builtin
def get_parent_until(self):
return compiled.builtin
def __getattr__(self, name):
if name not in ['start_pos', 'get_only_subelement', 'parent',
'get_parent_until', 'items']:
raise AttributeError('Strange access on %s: %s.' % (self, name))
return getattr(self.atom, name)
def _values(self):
"""Returns a list of a list of node."""
if self.type == 'dict':
return list(chain.from_iterable(v for k, v in self._items()))
else:
return self._items()
def _items(self):
c = self.atom.children
array_node = c[1]
if array_node in (']', '}', ')'):
return [] # Direct closing bracket, doesn't contain items.
if tree.is_node(array_node, 'testlist_comp'):
return array_node.children[::2]
elif tree.is_node(array_node, 'dictorsetmaker'):
kv = []
iterator = iter(array_node.children)
for key in iterator:
op = next(iterator, None)
if op is None or op == ',':
kv.append(key) # A set.
elif op == ':': # A dict.
kv.append((key, [next(iterator)]))
next(iterator, None) # Possible comma.
else:
raise NotImplementedError('dict/set comprehensions')
return kv
else:
return [array_node]
def __iter__(self):
return iter(self._items())
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self.atom)
class _FakeArray(Array):
def __init__(self, evaluator, container, type):
self.type = type
self._evaluator = evaluator
self.atom = container
class ImplicitTuple(_FakeArray):
def __init__(self, evaluator, testlist):
super(ImplicitTuple, self).__init__(evaluator, testlist, 'tuple')
self._testlist = testlist
def _items(self):
return self._testlist.children[::2]
class FakeSequence(_FakeArray):
def __init__(self, evaluator, sequence_values, type):
super(FakeSequence, self).__init__(evaluator, sequence_values, type)
self._sequence_values = sequence_values
def _items(self):
return self._sequence_values
def get_exact_index_types(self, index):
value = self._sequence_values[index]
return self._evaluator.eval_element(value)
class AlreadyEvaluated(frozenset):
"""A simple container to add already evaluated objects to an array."""
def get_code(self):
# For debugging purposes.
return str(self)
class MergedNodes(frozenset):
pass
class FakeDict(_FakeArray):
def __init__(self, evaluator, dct):
super(FakeDict, self).__init__(evaluator, dct, 'dict')
self._dct = dct
def get_exact_index_types(self, index):
return list(chain.from_iterable(self._evaluator.eval_element(v)
for v in self._dct[index]))
def _items(self):
return self._dct.items()
class MergedArray(_FakeArray):
def __init__(self, evaluator, arrays):
super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].type)
self._arrays = arrays
def get_exact_index_types(self, mixed_index):
raise IndexError
def values(self):
return list(chain(*(a.values() for a in self._arrays)))
def __iter__(self):
for array in self._arrays:
for a in array:
yield a
def __len__(self):
return sum(len(a) for a in self._arrays)
def get_iterator_types(inputs):
"""Returns the types of any iterator (arrays, yields, __iter__, etc)."""
iterators = []
# Take the first statement (for has always only
# one, remember `in`). And follow it.
for it in inputs:
if isinstance(it, (Generator, Array, ArrayInstance, Comprehension)):
iterators.append(it)
else:
if not hasattr(it, 'execute_subscope_by_name'):
debug.warning('iterator/for loop input wrong: %s', it)
continue
try:
iterators += it.execute_subscope_by_name('__iter__')
except KeyError:
debug.warning('iterators: No __iter__ method found.')
result = []
from jedi.evaluate.representation import Instance
for it in iterators:
if isinstance(it, Array):
# Array is a little bit special, since this is an internal array,
# but there's also the list builtin, which is another thing.
result += it.values()
elif isinstance(it, Instance):
# __iter__ returned an instance.
name = '__next__' if is_py3 else 'next'
try:
result += it.execute_subscope_by_name(name)
except KeyError:
debug.warning('Instance has no __next__ function in %s.', it)
else:
# TODO this is not correct, __iter__ can return arbitrary input!
# Is a generator.
result += it.iter_content()
return result
def check_array_additions(evaluator, array):
""" Just a mapper function for the internal _check_array_additions """
if array.type not in ('list', 'set'):
# TODO also check for dict updates
return []
is_list = array.type == 'list'
try:
current_module = array.atom.get_parent_until()
except AttributeError:
# If there's no get_parent_until, it's a FakeSequence or another Fake
# type. Those fake types are used inside Jedi's engine. No values may
# be added to those after their creation.
return []
return _check_array_additions(evaluator, array, current_module, is_list)
@memoize_default([], evaluator_is_first_arg=True)
def _check_array_additions(evaluator, compare_array, module, is_list):
"""
Checks if a `Array` has "add" (append, insert, extend) statements:
>>> a = [""]
>>> a.append(1)
"""
if not settings.dynamic_array_additions or isinstance(module, compiled.CompiledObject):
return []
def check_additions(arglist, add_name):
params = list(param.Arguments(evaluator, arglist).unpack())
result = []
if add_name in ['insert']:
params = params[1:]
if add_name in ['append', 'add', 'insert']:
for key, nodes in params:
result += unite(evaluator.eval_element(node) for node in nodes)
elif add_name in ['extend', 'update']:
for key, nodes in params:
iterators = unite(evaluator.eval_element(node) for node in nodes)
result += get_iterator_types(iterators)
return result
from jedi.evaluate import representation as er, param
def get_execution_parent(element):
""" Used to get an Instance/FunctionExecution parent """
if isinstance(element, Array):
node = element.atom
else:
# Is an Instance with an
# Arguments([AlreadyEvaluated([ArrayInstance])]) inside
# Yeah... I know... It's complicated ;-)
node = list(element.var_args.argument_node[0])[0].var_args.trailer
if isinstance(node, er.InstanceElement):
return node
return node.get_parent_until(er.FunctionExecution)
temp_param_add, settings.dynamic_params_for_other_modules = \
settings.dynamic_params_for_other_modules, False
search_names = ['append', 'extend', 'insert'] if is_list else ['add', 'update']
comp_arr_parent = get_execution_parent(compare_array)
added_types = []
for add_name in search_names:
try:
possible_names = module.used_names[add_name]
except KeyError:
continue
else:
for name in possible_names:
# Check if the original scope is an execution. If it is, one
# can search for the same statement, that is in the module
# dict. Executions are somewhat special in jedi, since they
# literally copy the contents of a function.
if isinstance(comp_arr_parent, er.FunctionExecution):
if comp_arr_parent.start_pos < name.start_pos < comp_arr_parent.end_pos:
name = comp_arr_parent.name_for_position(name.start_pos)
else:
# Don't check definitions that are not defined in the
# same function. This is not "proper" anyway. It also
# improves Jedi's speed for array lookups, since we
# don't have to check the whole source tree anymore.
continue
trailer = name.parent
power = trailer.parent
trailer_pos = power.children.index(trailer)
try:
execution_trailer = power.children[trailer_pos + 1]
except IndexError:
continue
else:
if execution_trailer.type != 'trailer' \
or execution_trailer.children[0] != '(' \
or execution_trailer.children[1] == ')':
continue
power = helpers.call_of_name(name, cut_own_trailer=True)
# InstanceElements are special, because they don't get copied,
# but have this wrapper around them.
if isinstance(comp_arr_parent, er.InstanceElement):
power = er.get_instance_el(evaluator, comp_arr_parent.instance, power)
if evaluator.recursion_detector.push_stmt(power):
# Check for recursion. Possible by using 'extend' in
# combination with function calls.
continue
if compare_array in evaluator.eval_element(power):
# The arrays match. Now add the results
added_types += check_additions(execution_trailer.children[1], add_name)
evaluator.recursion_detector.pop_stmt()
# reset settings
settings.dynamic_params_for_other_modules = temp_param_add
return added_types
def check_array_instances(evaluator, instance):
"""Used for set() and list() instances."""
if not settings.dynamic_array_additions:
return instance.var_args
ai = ArrayInstance(evaluator, instance)
from jedi.evaluate import param
return param.Arguments(evaluator, [AlreadyEvaluated([ai])])
class ArrayInstance(IterableWrapper):
"""
Used for the usage of set() and list().
This is definitely a hack, but a good one :-)
It makes it possible to use set/list conversions.
In contrast to Array, ListComprehension and all other iterable types, this
is something that is only used inside `evaluate/compiled/fake/builtins.py`
and therefore doesn't need `names_dicts`, `py__bool__` and so on, because
we don't use these operations in `builtins.py`.
"""
def __init__(self, evaluator, instance):
self._evaluator = evaluator
self.instance = instance
self.var_args = instance.var_args
def iter_content(self):
"""
The index is here just ignored, because of all the appends, etc.
lists/sets are too complicated too handle that.
"""
items = []
for key, nodes in self.var_args.unpack():
for node in nodes:
for typ in self._evaluator.eval_element(node):
items += get_iterator_types([typ])
module = self.var_args.get_parent_until()
is_list = str(self.instance.name) == 'list'
items += _check_array_additions(self._evaluator, self.instance, module, is_list)
return items
class Slice(object):
def __init__(self, evaluator, start, stop, step):
self._evaluator = evaluator
# all of them are either a Precedence or None.
self._start = start
self._stop = stop
self._step = step
@property
def obj(self):
"""
Imitate CompiledObject.obj behavior and return a ``builtin.slice()``
object.
"""
def get(element):
if element is None:
return None
result = self._evaluator.eval_element(element)
if len(result) != 1:
# We want slices to be clear defined with just one type.
# Otherwise we will return an empty slice object.
raise IndexError
try:
return result[0].obj
except AttributeError:
return None
try:
return slice(get(self._start), get(self._stop), get(self._step))
except IndexError:
return slice(None, None, None)
def create_indexes_or_slices(evaluator, index):
if tree.is_node(index, 'subscript'): # subscript is a slice operation.
start, stop, step = None, None, None
result = []
for el in index.children:
if el == ':':
if not result:
result.append(None)
elif tree.is_node(el, 'sliceop'):
if len(el.children) == 2:
result.append(el.children[1])
else:
result.append(el)
result += [None] * (3 - len(result))
return (Slice(evaluator, *result),)
return evaluator.eval_element(index)
|
lirenjie95/DataMining | refs/heads/master | DataMining-UIUC/DataVisualization/dolphins.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import string
import networkx
from matplotlib import pyplot as plt
Net = networkx.Graph()
NodeName = dict()
fid = open('dolphins.gml','r')
flag_node = False
for line in fid:
if flag_node:
line = line.rsplit()
if line[0]=='id':
node_x = line[1]
elif line[0]=='label':
name_x = line[1][1:-1]
elif line[0]==']':
flag_node = False
NodeName[node_x]=name_x
if line==' node\n':
flag_node = True
fid.close()
fid = open('dolphins.paj','r')
flag_edge = False
for line in fid:
if flag_edge:
line = line.rsplit()
node_x = string.atoi(line[0])
node_y = string.atoi(line[1])
node_x = node_x-1
node_y = node_y-1
name_x = NodeName['%d'%node_x]
name_y = NodeName['%d'%node_y]
Net.add_edge(name_x,name_y)
continue
# Only for edges
if line=='*Edges\n':
flag_edge = True
else:
if re.search('"',line):
line = line.rsplit()
name_x = NodeName[line[1][1:-1]]
Net.add_node(name_x)
# Only for nodes
fid.close()
plt.title("Dolphins Social Network",fontsize=20)
networkx.draw_random(Net,node_size=200,fontsize=1,node_color='b',edge_color='g',alpha=0.8,width=0.3)
plt.show()
plt.savefig('dolphins.png') |
drcapulet/sentry | refs/heads/master | src/sentry/db/postgres/exceptions.py | 28 | from __future__ import absolute_import
import psycopg2
import traceback
class CompositeTraceback(object):
def __init__(self, tb_list):
assert isinstance(tb_list, (list, tuple))
self.__tb_list = tb_list
self.__iterator = iter(self)
def __iter__(self):
for tb in self.__tb_list:
print repr(tb)
while tb:
self.__curframe = tb
tb = tb.tb_next
print '*', repr(tb)
yield tb
def tb_frame(self):
return self.__curframe.tb_frame
def tb_lasti(self):
return self.__curframe.tb_lasti
def tb_lineno(self):
return self.__curframe.tb_lineno
def tb_next(self):
self.__iterator.next()
return self
class TransactionAborted(psycopg2.DatabaseError):
def __init__(self, exc_info, cur_exc_info):
self.exc_info = exc_info
self.cur_exc_info = cur_exc_info
def __repr__(self):
return '\n'.join(traceback.format_exception(self.__class__, self, self.get_traceback()))
def __str__(self):
return str(unicode(self))
def __unicode__(self):
return u'(%s) %s' % (self.cur_exc_info[0].__name__, self.cur_exc_info[1])
def get_traceback(self):
return CompositeTraceback([self.exc_info[2], self.cur_exc_info[2]])
|
mcking49/apache-flask | refs/heads/master | Python/Lib/site-packages/jinja2/testsuite/__init__.py | 404 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite
~~~~~~~~~~~~~~~~
All the unittests of Jinja2. These tests can be executed by
either running run-tests.py using multiple Python versions at
the same time.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import unittest
from traceback import format_exception
from jinja2 import loaders
from jinja2._compat import PY2
here = os.path.dirname(os.path.abspath(__file__))
dict_loader = loaders.DictLoader({
'justdict.html': 'FOO'
})
package_loader = loaders.PackageLoader('jinja2.testsuite.res', 'templates')
filesystem_loader = loaders.FileSystemLoader(here + '/res/templates')
function_loader = loaders.FunctionLoader({'justfunction.html': 'FOO'}.get)
choice_loader = loaders.ChoiceLoader([dict_loader, package_loader])
prefix_loader = loaders.PrefixLoader({
'a': filesystem_loader,
'b': dict_loader
})
class JinjaTestCase(unittest.TestCase):
### use only these methods for testing. If you need standard
### unittest method, wrap them!
def setup(self):
pass
def teardown(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
self.teardown()
def assert_equal(self, a, b):
return self.assertEqual(a, b)
def assert_raises(self, *args, **kwargs):
return self.assertRaises(*args, **kwargs)
def assert_traceback_matches(self, callback, expected_tb):
try:
callback()
except Exception as e:
tb = format_exception(*sys.exc_info())
if re.search(expected_tb.strip(), ''.join(tb)) is None:
raise self.fail('Traceback did not match:\n\n%s\nexpected:\n%s'
% (''.join(tb), expected_tb))
else:
self.fail('Expected exception')
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def suite():
from jinja2.testsuite import ext, filters, tests, core_tags, \
loader, inheritance, imports, lexnparse, security, api, \
regression, debug, utils, bytecode_cache, doctests
suite = unittest.TestSuite()
suite.addTest(ext.suite())
suite.addTest(filters.suite())
suite.addTest(tests.suite())
suite.addTest(core_tags.suite())
suite.addTest(loader.suite())
suite.addTest(inheritance.suite())
suite.addTest(imports.suite())
suite.addTest(lexnparse.suite())
suite.addTest(security.suite())
suite.addTest(api.suite())
suite.addTest(regression.suite())
suite.addTest(debug.suite())
suite.addTest(utils.suite())
suite.addTest(bytecode_cache.suite())
# doctests will not run on python 3 currently. Too many issues
# with that, do not test that on that platform.
if PY2:
suite.addTest(doctests.suite())
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception as e:
print('Error: %s' % e)
|
wxthon/googletest | refs/heads/master | test/gtest_help_test.py | 2968 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
|
michellab/SireUnitTests | refs/heads/devel | unittests/SireIO/test_amber2_write.py | 1 |
from Sire.IO import *
from Sire.Mol import *
from Sire.MM import *
from Sire.FF import *
from nose.tools import assert_equal, assert_almost_equal
def _getEnergies(s):
intraclj = IntraFF("intraclj")
intraclj.add(s.molecules())
intraff = InternalFF("intraff")
intraff.setUse14Calculation(True)
intraff.add(s.molecules())
interff = InterFF("interff")
interff.add(s.molecules())
ffs = ForceFields()
ffs.add(intraclj)
ffs.add(intraff)
ffs.add(interff)
return ffs.energies()
def _printCompareEnergies(oldnrgs, newnrgs):
keys = list(oldnrgs.keys())
keys.sort()
for key in keys:
print("%s: %s %s" % (key, oldnrgs[key],newnrgs[key]))
def _assert_almost_equal(oldnrgs, newnrgs):
oldkeys = list(oldnrgs.keys())
newkeys = list(newnrgs.keys())
oldkeys.sort()
newkeys.sort()
assert_equal( oldkeys, newkeys )
for key in oldkeys:
assert_almost_equal( oldnrgs[key], newnrgs[key], 5 )
def _test_write(files, verbose=False):
try:
s = MoleculeParser.read
except:
return
if verbose:
print("Reading... %s | %s" % (files[0],files[1]))
s = MoleculeParser.read(files)
# calculate the initial internal energy
oldnrgs = _getEnergies(s)
if verbose:
print("Writing...")
filenames = MoleculeParser.write(s, "test")
if verbose:
print("Saved the system to file(s): %s" % filenames)
# read this back in and check the energies
s = MoleculeParser.read(filenames)
newnrgs = _getEnergies(s)
if verbose:
_printCompareEnergies(oldnrgs,newnrgs)
_assert_almost_equal(oldnrgs, newnrgs)
def test_write(verbose=False):
_test_write( ["../io/ose.top", "../io/ose.crd"], verbose )
_test_write( ["../io/thrombin.top", "../io/thrombin.rst7"], verbose )
_test_write( ["../io/proteinbox.top", "../io/proteinbox.crd"], verbose )
if __name__ == "__main__":
test_write(True)
|
erindieringer/erindieringer-byte1 | refs/heads/master | lib/werkzeug/test.py | 308 | # -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from io import BytesIO
try:
from urllib2 import Request as U2Request
except ImportError:
from urllib.request import Request as U2Request
try:
from http.cookiejar import CookieJar
except ImportError: # Py2
from cookielib import CookieJar
from werkzeug._compat import iterlists, iteritems, itervalues, to_native, \
string_types, text_type, reraise, wsgi_encoding_dance, \
make_literal_wrapper
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \
url_unparse, url_parse
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
from werkzeug.utils import dump_cookie
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [BytesIO(), 0, False]
if use_tempfile:
def write_binary(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write_binary = _closure[0].write
def write(string):
write_binary(string.encode(charset))
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in iterlists(values):
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write_binary(chunk)
else:
if isinstance(value, string_types):
value = to_native(value, charset)
else:
value = str(value)
write('\r\n\r\n' + value)
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(self, name, default=None):
rv = []
for k, v in self.headers:
if k.lower() == name.lower():
rv.append(v)
return rv or default or []
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = '; '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in iterlists(data):
for value in values:
yield key, value
else:
for key, values in iteritems(data):
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
path_s = make_literal_wrapper(path)
if query_string is None and path_s('?') in path:
path, query_string = path.split(path_s('?'), 1)
self.charset = charset
self.path = iri_to_uri(path)
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url
if isinstance(query_string, (bytes, text_type)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, text_type):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return url_unparse((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self.method in ('POST', 'PUT', 'PATCH'):
if self._files:
return 'multipart/form-data'
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = itervalues(self.files)
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
#py2v3 review
values = url_encode(self.form, charset=self.charset)
values = values.encode('ascii')
content_length = len(values)
input_stream = BytesIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
qs = wsgi_encoding_dance(self.query_string)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': qs,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_wsgi_list():
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True,
allow_subdomain_redirects=False):
self.application = application
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(self, server_name, key, value='', max_age=None,
expires=None, path='/', domain=None, secure=None,
httponly=False, charset='utf-8'):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, 'cookies disabled'
header = dump_cookie(key, value, max_age, expires, path, domain,
secure, httponly, charset)
environ = create_environ(path, base_url='http://' + server_name)
headers = [('Set-Cookie', header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path='/', domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(server_name, key, expires=0, max_age=0,
path=path, domain=domain)
def run_wsgi_app(self, environ, buffered=False):
"""Runs the wrapped WSGI app with the given environment."""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(self, response, new_location, environ, buffered=False):
"""Resolves a single redirect and triggers the request again
directly on this redirect client.
"""
scheme, netloc, script_root, qs, anchor = url_parse(new_location)
base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/'
cur_server_name = netloc.split(':', 1)[0].split('.')
real_server_name = get_host(environ).rsplit(':', 1)[0].split('.')
if self.allow_subdomain_redirects:
allowed = cur_server_name[-len(real_server_name):] == real_server_name
else:
allowed = cur_server_name == real_server_name
if not allowed:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
# For redirect handling we temporarily disable the response
# wrapper. This is not threadsafe but not a real concern
# since the test client must not be shared anyways.
old_response_wrapper = self.response_wrapper
self.response_wrapper = None
try:
return self.open(path=script_root, base_url=base_url,
query_string=qs, as_tuple=True,
buffered=buffered)
finally:
self.response_wrapper = old_response_wrapper
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
response = self.run_wsgi_app(environ, buffered=buffered)
# handle redirects
redirect_chain = []
while 1:
status_code = int(response[1].split(None, 1)[0])
if status_code not in (301, 302, 303, 305, 307) \
or not follow_redirects:
break
new_location = response[2]['location']
new_redirect_entry = (new_location, status_code)
if new_redirect_entry in redirect_chain:
raise ClientRedirectError('loop detected')
redirect_chain.append(new_redirect_entry)
environ, response = self.resolve_redirect(response, new_location,
environ, buffered=buffered)
if self.response_wrapper is not None:
response = self.response_wrapper(*response)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw['method'] = 'PATCH'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def options(self, *args, **kw):
"""Like open but method is enforced to OPTIONS."""
kw['method'] = 'OPTIONS'
return self.open(*args, **kw)
def trace(self, *args, **kw):
"""Like open but method is enforced to TRACE."""
kw['method'] = 'TRACE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
reraise(*exc_info)
response[:] = [status, headers]
return buffer.append
app_iter = app(environ, start_response)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
close_func = getattr(app_iter, 'close', None)
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have
# a response, chain the already received data with the already
# collected data and wrap it in a new `ClosingIterator` if
# we have a close callable.
else:
while not response:
buffer.append(next(app_iter))
if buffer:
close_func = getattr(app_iter, 'close', None)
app_iter = chain(buffer, app_iter)
if close_func is not None:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], Headers(response[1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.