repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
testbed/testbed
|
refs/heads/master
|
testbed/libexec/test/test.py
|
2
|
# (c) 2015 Mark Hamilton, <mark.lee.hamilton@gmail.com>
#
# This file is part of testbed
#
# Testbed is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Testbed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Testdb. If not, see <http://www.gnu.org/licenses/>.
"""
Test test functionality.
"""
import argparse
from django.test import TestCase
from testdb.models import Testsuite
from testdb.models import Test
from . import commands
from testbed.libexec import testplan
class TestTestCase(TestCase):
""" Tests for Django backend.
These test must derive from django.test.TestCase. """
@staticmethod
def parser_create():
""" Create sub parser. """
arg_parser = argparse.ArgumentParser(prog="tbd")
subparser = arg_parser.add_subparsers(title="subcommands")
commands.add_subparser(subparser)
return arg_parser
def test_commands_add(self):
""" Add a test. """
parser = TestTestCase.parser_create()
testplan.api.get_or_create(testplan.api.CONTEXT, "testsuite1",
testplan.api.ORDER_NEXT)
args = parser.parse_args("test add build1 testsuite1 test1".split())
args.func(args)
args = parser.parse_args("test add build1 testsuite1 test2".split())
args.func(args)
names = [item.name.name for item in Testsuite.contains("default",
"testsuite1")]
self.assertTrue(len(names) == 1)
names = [item.name.name for item in Test.filter("testsuite1")]
self.assertTrue(len(names) == 2)
self.assertTrue(any("test1" in name for name in names))
self.assertTrue(any("test2" in name for name in names))
def test_context_add(self):
""" Add a testsuite ignoring context.
Testplan is created called testplan1. Adding a testsuite which is
not part of the default testplan should still work.
"""
testplan.api.get_or_create("testplan.testplan1", "testsuite1",
testplan.api.ORDER_NEXT)
parser = TestTestCase.parser_create()
cmd = "test add build1 testsuite1 test1 --context testschedule1"
args = parser.parse_args(cmd.split())
args.func(args)
tests = Test.filter("testsuite1")
self.assertTrue(len(tests) == 1)
names = [item.name.name for item in tests]
self.assertTrue("test1" in names)
names = [item.testsuite.name.name for item in tests]
self.assertTrue("testsuite1" in names)
tests = Test.filter("testschedule1")
self.assertTrue(len(tests) == 1)
context = [item.testsuite.context.name for item in tests]
self.assertTrue("testschedule1" in context)
names = [item.name.name for item in tests]
self.assertTrue("test1" in names)
names = [item.testsuite.name.name for item in tests]
self.assertTrue("testsuite1" in names)
def test_list_filter(self):
""" Add a test to testsuite. """
parser = TestTestCase.parser_create()
testplan.api.get_or_create(testplan.api.CONTEXT, "testsuite2",
testplan.api.ORDER_NEXT)
testplan.api.get_or_create(testplan.api.CONTEXT, "testsuite3",
testplan.api.ORDER_NEXT)
cmd = "test add build1 testsuite2 test1"
args = parser.parse_args(cmd.split())
args.func(args)
cmd = "test add build1 testsuite3 test2"
args = parser.parse_args(cmd.split())
args.func(args)
cmd = "test add build1 testsuite3 test3"
args = parser.parse_args(cmd.split())
args.func(args)
tests = Test.filter("testsuite3")
items = [item for item in tests]
self.assertTrue(len(items) == 2)
names = [item.name.name for item in items]
self.assertTrue(any("test2" in name for name in names))
self.assertTrue(any("test3" in name for name in names))
|
LukeMurphey/splunk-file-info
|
refs/heads/master
|
tests/HTMLTestRunner.py
|
1
|
# -*- coding: utf-8 -*-
"""
A TestRunner for use with the Python unit testing framework. It
generates a HTML report to show the result at a glance.
The simplest way to use this is to invoke its main method. E.g.
import unittest
import HTMLTestRunner
... define your tests ...
if __name__ == '__main__':
HTMLTestRunner.main()
For more customization options, instantiates a HTMLTestRunner object.
HTMLTestRunner is a counterpart to unittest's TextTestRunner. E.g.
# output to a file
fp = file('my_report.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title='My unit test',
description='This demonstrates the report output by HTMLTestRunner.'
)
# Use an external stylesheet.
# See the Template_mixin class for more customizable options
runner.STYLESHEET_TMPL = '<link rel="stylesheet" href="my_stylesheet.css" type="text/css">'
# run the test
runner.run(my_test_suite)
------------------------------------------------------------------------
Copyright (c) 2004-2007, Wai Yip Tung
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name Wai Yip Tung nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# URL: http://tungwaiyip.info/software/HTMLTestRunner.html
__author__ = "Wai Yip Tung"
__version__ = "0.8.3"
"""
Change History
Version 0.8.3
* Prevent crash on class or module-level exceptions (Darren Wurf).
Version 0.8.2
* Show output inline instead of popup window (Viorel Lupu).
Version in 0.8.1
* Validated XHTML (Wolfgang Borgert).
* Added description of test classes and test cases.
Version in 0.8.0
* Define Template_mixin class for customization.
* Workaround a IE 6 bug that it does not treat <script> block as CDATA.
Version in 0.7.1
* Back port to Python 2.3 (Frank Horowitz).
* Fix missing scroll bars in detail log (Podi).
"""
# TODO: color stderr
# TODO: simplify javascript using ,ore than 1 class in the class attribute?
import datetime
import sys
import time
import unittest
import six
import sys
import io
from xml.sax import saxutils
# ------------------------------------------------------------------------
# The redirectors below are used to capture output during testing. Output
# sent to sys.stdout and sys.stderr are automatically captured. However
# in some cases sys.stdout is already cached before HTMLTestRunner is
# invoked (e.g. calling logging.basicConfig). In order to capture those
# output, use the redirectors for the cached stream.
#
# e.g.
# >>> logging.basicConfig(stream=HTMLTestRunner.stdout_redirector)
# >>>
def to_unicode(s):
try:
return six.text_type(s)
except UnicodeDecodeError:
# s is non ascii byte string
return s.decode('unicode_escape')
class OutputRedirector(object):
"""
Wrapper to redirect stdout or stderr.
"""
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(s)
def writelines(self, lines):
# lines = map(to_unicode, lines)
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
# ----------------------------------------------------------------------
# Template
class Template_mixin(object):
"""
Define a HTML template for report customerization and generation.
Overall structure of an HTML report
HTML
+------------------------+
|<html> |
| <head> |
| |
| STYLESHEET |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </head> |
| |
| <body> |
| |
| HEADING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| REPORT |
| +----------------+ |
| | | |
| +----------------+ |
| |
| ENDING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </body> |
|</html> |
+------------------------+
"""
STATUS = {
0: 'pass',
1: 'fail',
2: 'error',
}
DEFAULT_TITLE = 'Unit Test Report'
DEFAULT_DESCRIPTION = ''
# ------------------------------------------------------------------------
# HTML Template
HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta name="generator" content="%(generator)s"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
%(stylesheet)s
</head>
<body>
<script language="javascript" type="text/javascript"><!--
output_list = Array();
/* level - 0:Summary; 1:Failed; 2:All */
function showCase(level) {
trs = document.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
tr = trs[i];
id = tr.id;
if (id.substr(0,2) == 'ft') {
if (level < 1) {
tr.className = 'hiddenRow';
}
else {
tr.className = '';
}
}
if (id.substr(0,2) == 'pt') {
if (level > 1) {
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
}
}
function showClassDetail(cid, count) {
var id_list = Array(count);
var toHide = 1;
for (var i = 0; i < count; i++) {
tid0 = 't' + cid.substr(1) + '.' + (i+1);
tid = 'f' + tid0;
tr = document.getElementById(tid);
if (!tr) {
tid = 'p' + tid0;
tr = document.getElementById(tid);
}
id_list[i] = tid;
if (tr.className) {
toHide = 0;
}
}
for (var i = 0; i < count; i++) {
tid = id_list[i];
if (toHide) {
if(document.getElementById('div_'+tid)){
document.getElementById('div_'+tid).style.display = 'none';
}
document.getElementById(tid).className = 'hiddenRow';
}
else {
document.getElementById(tid).className = '';
}
}
}
function showTestDetail(div_id){
var details_div = document.getElementById(div_id)
var displayState = details_div.style.display
// alert(displayState)
if (displayState != 'block' ) {
displayState = 'block'
details_div.style.display = 'block'
}
else {
details_div.style.display = 'none'
}
}
function html_escape(s) {
s = s.replace(/&/g,'&');
s = s.replace(/</g,'<');
s = s.replace(/>/g,'>');
return s;
}
/* obsoleted by detail in <div>
function showOutput(id, name) {
var w = window.open("", //url
name,
"resizable,scrollbars,status,width=800,height=450");
d = w.document;
d.write("<pre>");
d.write(html_escape(output_list[id]));
d.write("\n");
d.write("<a href='javascript:window.close()'>close</a>\n");
d.write("</pre>\n");
d.close();
}
*/
--></script>
%(heading)s
%(report)s
%(ending)s
</body>
</html>
"""
# variables: (title, generator, stylesheet, heading, report, ending)
# ------------------------------------------------------------------------
# Stylesheet
#
# alternatively use a <link> for external style sheet, e.g.
# <link rel="stylesheet" href="$url" type="text/css">
STYLESHEET_TMPL = """
<style type="text/css" media="screen">
body { font-family: verdana, arial, helvetica, sans-serif; font-size: 80%; }
table { font-size: 100%; }
pre { }
/* -- heading ---------------------------------------------------------------------- */
h1 {
font-size: 16pt;
color: gray;
}
.heading {
margin-top: 0ex;
margin-bottom: 1ex;
}
.heading .attribute {
margin-top: 1ex;
margin-bottom: 0;
}
.heading .description {
margin-top: 4ex;
margin-bottom: 6ex;
}
/* -- css div popup ------------------------------------------------------------------------ */
a.popup_link {
}
a.popup_link:hover {
color: red;
}
.popup_window {
display: none;
position: relative;
left: 0px;
top: 0px;
padding: 10px;
background-color: #EEE;
font-family: "Lucida Console", "Courier New", Courier, monospace;
text-align: left;
font-size: 8pt;
}
}
/* -- report ------------------------------------------------------------------------ */
#show_detail_line {
margin-top: 3ex;
margin-bottom: 1ex;
}
#result_table {
width: 80%;
border-collapse: collapse;
border: 1px solid #777;
}
#header_row {
font-weight: bold;
color: white;
background-color: #777;
}
#result_table td {
border: 1px solid rgba(119, 119, 119, 0.23);
padding: 2px;
vertical-align:top;
}
#total_row { font-weight: bold; }
.passClass { background-color: #00c853; color: white;}
.failClass { background-color: #fa842d; color: white;}
.errorClass { background-color: #fa2d2d; color: white;}
.passCase { color: #00c853; }
.failCase { color: #fa842d; background-color: #f9ede4; }
.errorCase { color: #fa2d2d; background-color: #ffefef }
.hiddenRow { display: none; }
.testcase { margin-left: 2em; }
/* -- ending ---------------------------------------------------------------------- */
#ending {
}
</style>
"""
# ------------------------------------------------------------------------
# Heading
#
HEADING_TMPL = """<div class='heading'>
<h1>%(title)s</h1>
%(parameters)s
<p class='description'>%(description)s</p>
</div>
""" # variables: (title, parameters, description)
HEADING_ATTRIBUTE_TMPL = """<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
""" # variables: (name, value)
# ------------------------------------------------------------------------
# Report
#
REPORT_TMPL = """
<p id='show_detail_line'>Show
<a href='javascript:showCase(0)'>Summary</a>
<a href='javascript:showCase(1)'>Failed</a>
<a href='javascript:showCase(2)'>All</a>
</p>
<table id='result_table'>
<colgroup>
<col align='left' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
</colgroup>
<tr id='header_row'>
<td>Test Group/Test case</td>
<td>Count</td>
<td>Pass</td>
<td>Fail</td>
<td>Error</td>
<td>View</td>
</tr>
%(test_list)s
<tr id='total_row'>
<td>Total</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td> </td>
</tr>
</table>
""" # variables: (test_list, count, Pass, fail, error)
REPORT_CLASS_TMPL = r"""
<tr class='%(style)s'>
<td>%(desc)s</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td><a href="javascript:showClassDetail('%(cid)s',%(count)s)">Detail</a></td>
</tr>
""" # variables: (style, desc, count, Pass, fail, error, cid)
REPORT_TEST_WITH_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>
<!--css div popup start-->
<a class="popup_link" onfocus='this.blur();' href="javascript:showTestDetail('div_%(tid)s')" >
%(status)s</a>
<div id='div_%(tid)s' class="popup_window">
<div style='text-align: right;cursor:pointer;font-size: large;font-weight: bold;'>
<a onfocus='this.blur();' onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
x</a>
</div>
<pre>
%(script)s
</pre>
</div>
<!--css div popup end-->
</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_NO_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>%(status)s</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_OUTPUT_TMPL = r"""
%(id)s: %(output)s
""" # variables: (id, output)
# ------------------------------------------------------------------------
# ENDING
#
ENDING_TMPL = """<div id='ending'> </div>"""
# -------------------- The end of the Template class -------------------
TestResult = unittest.TestResult
class _TestResult(TestResult):
# note: _TestResult is a pure representation of results.
# It lacks the output and reporting ability compares to unittest._TextTestResult.
def __init__(self, verbosity=1):
TestResult.__init__(self)
self.outputBuffer = io.BytesIO()#six.StringIO()
self.stdout0 = None
self.stderr0 = None
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.verbosity = verbosity
# result is a list of result in 4 tuple
# (
# result code (0: success; 1: fail; 2: error),
# TestCase object,
# Test output (byte string),
# stack trace,
# )
self.result = []
def startTest(self, test):
TestResult.startTest(self, test)
# just one buffer for both stdout and stderr
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.stdout0 = sys.stdout
self.stderr0 = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
def complete_output(self):
"""
Disconnect output redirection and return buffer.
Safe to call multiple times.
"""
if self.stdout0:
sys.stdout = self.stdout0
sys.stderr = self.stderr0
self.stdout0 = None
self.stderr0 = None
return self.outputBuffer.getvalue()
def stopTest(self, test):
# Usually one of addSuccess, addError or addFailure would have been called.
# But there are some path in unittest that would bypass this.
# We must disconnect stdout in stopTest(), which is guaranteed to be called.
self.complete_output()
def addSuccess(self, test):
self.success_count += 1
TestResult.addSuccess(self, test)
output = self.complete_output()
self.result.append((0, test, output, ''))
if self.verbosity > 1:
sys.stderr.write('ok ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('.')
def addError(self, test, err):
self.error_count += 1
TestResult.addError(self, test, err)
_, _exc_str = self.errors[-1]
output = self.complete_output()
self.result.append((2, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('E ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('E')
def addFailure(self, test, err):
self.failure_count += 1
TestResult.addFailure(self, test, err)
_, _exc_str = self.failures[-1]
output = self.complete_output()
self.result.append((1, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
class HTMLTestRunner(Template_mixin):
"""
"""
def __init__(self, stream=sys.stdout, verbosity=1, title=None, description=None):
self.stream = stream
self.verbosity = verbosity
if title is None:
self.title = self.DEFAULT_TITLE
else:
self.title = title
if description is None:
self.description = self.DEFAULT_DESCRIPTION
else:
self.description = description
self.startTime = datetime.datetime.now()
def run(self, test):
"Run the given test case or test suite."
result = _TestResult(self.verbosity)
test(result)
self.stopTime = datetime.datetime.now()
self.generateReport(test, result)
# print('\nTime Elapsed: %s' % (self.stopTime-self.startTime), file=sys.stderr)
return result
def sortResult(self, result_list):
# unittest does not seems to run in any particular order.
# Here at least we want to group them together by class.
rmap = {}
classes = []
for n,t,o,e in result_list:
cls = t.__class__
if not cls in rmap:
rmap[cls] = []
classes.append(cls)
rmap[cls].append((n,t,o,e))
r = [(cls, rmap[cls]) for cls in classes]
return r
def getReportAttributes(self, result):
"""
Return report attributes as a list of (name, value).
Override this to add custom attributes.
"""
startTime = str(self.startTime)[:19]
duration = str(self.stopTime - self.startTime)
status = []
if result.success_count: status.append('Pass %s' % result.success_count)
if result.failure_count: status.append('Failure %s' % result.failure_count)
if result.error_count: status.append('Error %s' % result.error_count )
if status:
status = ' '.join(status)
else:
status = 'none'
return [
('Start Time', startTime),
('Duration', duration),
('Status', status),
]
def generateReport(self, test, result):
report_attrs = self.getReportAttributes(result)
generator = 'HTMLTestRunner %s' % __version__
stylesheet = self._generate_stylesheet()
heading = self._generate_heading(report_attrs)
report = self._generate_report(result)
ending = self._generate_ending()
output = self.HTML_TMPL % dict(
title = saxutils.escape(self.title),
generator = generator,
stylesheet = stylesheet,
heading = heading,
report = report,
ending = ending,
)
self.stream.write(output) #.encode('utf8')
def _generate_stylesheet(self):
return self.STYLESHEET_TMPL
def _generate_heading(self, report_attrs):
a_lines = []
for name, value in report_attrs:
line = self.HEADING_ATTRIBUTE_TMPL % dict(
name = saxutils.escape(name),
value = saxutils.escape(value),
)
a_lines.append(line)
heading = self.HEADING_TMPL % dict(
title = saxutils.escape(self.title),
parameters = ''.join(a_lines),
description = saxutils.escape(self.description),
)
return heading
def _generate_report(self, result):
rows = []
sortedResult = self.sortResult(result.result)
for cid, (cls, cls_results) in enumerate(sortedResult):
# subtotal for a class
np = nf = ne = 0
for n,t,o,e in cls_results:
if n == 0: np += 1
elif n == 1: nf += 1
else: ne += 1
# format class description
if cls.__module__ == "__main__":
name = cls.__name__
else:
name = "%s.%s" % (cls.__module__, cls.__name__)
doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
desc = doc and '%s: %s' % (name, doc) or name
row = self.REPORT_CLASS_TMPL % dict(
style = ne > 0 and 'errorClass' or nf > 0 and 'failClass' or 'passClass',
desc = desc,
count = np+nf+ne,
Pass = np,
fail = nf,
error = ne,
cid = 'c%s' % (cid+1),
)
rows.append(row)
for tid, (n,t,o,e) in enumerate(cls_results):
self._generate_report_test(rows, cid, tid, n, t, o, e)
report = self.REPORT_TMPL % dict(
test_list = ''.join(rows),
count = str(result.success_count+result.failure_count+result.error_count),
Pass = str(result.success_count),
fail = str(result.failure_count),
error = str(result.error_count),
)
return report
def _generate_report_test(self, rows, cid, tid, n, t, o, e):
# e.g. 'pt1.1', 'ft1.1', etc
has_output = bool(o or e)
tid = (n == 0 and 'p' or 'f') + 't%s.%s' % (cid+1,tid+1)
name = t.id().split('.')[-1]
doc = t.shortDescription() or ""
desc = doc and ('%s: %s' % (name, doc)) or name
tmpl = has_output and self.REPORT_TEST_WITH_OUTPUT_TMPL or self.REPORT_TEST_NO_OUTPUT_TMPL
# o and e should be byte string because they are collected from stdout and stderr?
if isinstance(o,str) and hasattr(o, 'decode'):
# TODO: some problem with 'string_escape': it escapes \n and messes up formatting
# uo = unicode(o.encode('string_escape'))
uo = o.decode('latin-1')
else:
uo = o
if isinstance(e,str) and hasattr(e, 'decode'):
# TODO: some problem with 'string_escape': it escapes \n and messes up formatting
# ue = unicode(e.encode('string_escape'))
ue = e.decode('latin-1')
else:
ue = e
script = self.REPORT_TEST_OUTPUT_TMPL % dict(
id = tid,
output = saxutils.escape(uo+ue),
)
row = tmpl % dict(
tid = tid,
Class = (n == 0 and 'hiddenRow' or 'none'),
style = n == 2 and 'errorCase' or (n == 1 and 'failCase' or 'none'),
desc = desc,
script = script,
status = self.STATUS[n],
)
rows.append(row)
if not has_output:
return
def _generate_ending(self):
return self.ENDING_TMPL
##############################################################################
# Facilities for running tests from the command line
##############################################################################
# Note: Reuse unittest.TestProgram to launch test. In the future we may
# build our own launcher to support more specific command line
# parameters like test title, CSS, etc.
class TestProgram(unittest.TestProgram):
"""
A variation of the unittest.TestProgram. Please refer to the base
class for command line parameters.
"""
def runTests(self):
# Pick HTMLTestRunner as the default test runner.
# base class's testRunner parameter is not useful because it means
# we have to instantiate HTMLTestRunner before we know self.verbosity.
if self.testRunner is None:
self.testRunner = HTMLTestRunner(verbosity=self.verbosity)
unittest.TestProgram.runTests(self)
main = TestProgram
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None)
|
brennie/reviewboard
|
refs/heads/master
|
reviewboard/attachments/evolutions/file_attachment_orig_filename.py
|
10
|
from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('FileAttachment', 'orig_filename', models.CharField,
max_length=256, null=True)
]
|
nishad-jobsglobal/odoo-marriot
|
refs/heads/master
|
addons/hr_payroll_account/__openerp__.py
|
260
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payroll Accounting',
'version': '1.0',
'category': 'Human Resources',
'description': """
Generic Payroll system Integrated with Accounting.
==================================================
* Expense Encoding
* Payment Encoding
* Company Contribution Management
""",
'author':'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': [
'hr_payroll',
'account',
'hr_expense'
],
'data': ['hr_payroll_account_view.xml'],
'demo': ['hr_payroll_account_demo.xml'],
'test': ['test/hr_payroll_account.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
anant-dev/django
|
refs/heads/master
|
tests/custom_pk/tests.py
|
326
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import IntegrityError, transaction
from django.test import TestCase, skipIfDBFeature
from django.utils import six
from .models import Bar, Business, Employee, Foo
class BasicCustomPKTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.dan = Employee.objects.create(
employee_code=123, first_name="Dan", last_name="Jones",
)
cls.fran = Employee.objects.create(
employee_code=456, first_name="Fran", last_name="Bones",
)
cls.business = Business.objects.create(name="Sears")
cls.business.employees.add(cls.dan, cls.fran)
def test_querysets(self):
"""
Both pk and custom attribute_name can be used in filter and friends
"""
self.assertQuerysetEqual(
Employee.objects.filter(pk=123), [
"Dan Jones",
],
six.text_type
)
self.assertQuerysetEqual(
Employee.objects.filter(employee_code=123), [
"Dan Jones",
],
six.text_type
)
self.assertQuerysetEqual(
Employee.objects.filter(pk__in=[123, 456]), [
"Fran Bones",
"Dan Jones",
],
six.text_type
)
self.assertQuerysetEqual(
Employee.objects.all(), [
"Fran Bones",
"Dan Jones",
],
six.text_type
)
self.assertQuerysetEqual(
Business.objects.filter(name="Sears"), [
"Sears"
],
lambda b: b.name
)
self.assertQuerysetEqual(
Business.objects.filter(pk="Sears"), [
"Sears",
],
lambda b: b.name
)
def test_querysets_related_name(self):
"""
Custom pk doesn't affect related_name based lookups
"""
self.assertQuerysetEqual(
self.business.employees.all(), [
"Fran Bones",
"Dan Jones",
],
six.text_type
)
self.assertQuerysetEqual(
self.fran.business_set.all(), [
"Sears",
],
lambda b: b.name
)
def test_querysets_relational(self):
"""
Queries across tables, involving primary key
"""
self.assertQuerysetEqual(
Employee.objects.filter(business__name="Sears"), [
"Fran Bones",
"Dan Jones",
],
six.text_type,
)
self.assertQuerysetEqual(
Employee.objects.filter(business__pk="Sears"), [
"Fran Bones",
"Dan Jones",
],
six.text_type,
)
self.assertQuerysetEqual(
Business.objects.filter(employees__employee_code=123), [
"Sears",
],
lambda b: b.name
)
self.assertQuerysetEqual(
Business.objects.filter(employees__pk=123), [
"Sears",
],
lambda b: b.name,
)
self.assertQuerysetEqual(
Business.objects.filter(employees__first_name__startswith="Fran"), [
"Sears",
],
lambda b: b.name
)
def test_get(self):
"""
Get can accept pk or the real attribute name
"""
self.assertEqual(Employee.objects.get(pk=123), self.dan)
self.assertEqual(Employee.objects.get(pk=456), self.fran)
self.assertRaises(
Employee.DoesNotExist,
lambda: Employee.objects.get(pk=42)
)
# Use the name of the primary key, rather than pk.
self.assertEqual(Employee.objects.get(employee_code=123), self.dan)
def test_pk_attributes(self):
"""
pk and attribute name are available on the model
No default id attribute is added
"""
# pk can be used as a substitute for the primary key.
# The primary key can be accessed via the pk property on the model.
e = Employee.objects.get(pk=123)
self.assertEqual(e.pk, 123)
# Or we can use the real attribute name for the primary key:
self.assertEqual(e.employee_code, 123)
self.assertRaises(AttributeError, lambda: e.id)
def test_in_bulk(self):
"""
Custom pks work with in_bulk, both for integer and non-integer types
"""
emps = Employee.objects.in_bulk([123, 456])
self.assertEqual(emps[123], self.dan)
self.assertEqual(Business.objects.in_bulk(["Sears"]), {
"Sears": self.business,
})
def test_save(self):
"""
custom pks do not affect save
"""
fran = Employee.objects.get(pk=456)
fran.last_name = "Jones"
fran.save()
self.assertQuerysetEqual(
Employee.objects.filter(last_name="Jones"), [
"Dan Jones",
"Fran Jones",
],
six.text_type
)
class CustomPKTests(TestCase):
def test_custom_pk_create(self):
"""
New objects can be created both with pk and the custom name
"""
Employee.objects.create(employee_code=1234, first_name="Foo", last_name="Bar")
Employee.objects.create(pk=1235, first_name="Foo", last_name="Baz")
Business.objects.create(name="Bears")
Business.objects.create(pk="Tears")
def test_unicode_pk(self):
# Primary key may be unicode string
Business.objects.create(name='jaźń')
def test_unique_pk(self):
# The primary key must also obviously be unique, so trying to create a
# new object with the same primary key will fail.
Employee.objects.create(
employee_code=123, first_name="Frank", last_name="Jones"
)
with self.assertRaises(IntegrityError):
with transaction.atomic():
Employee.objects.create(employee_code=123, first_name="Fred", last_name="Jones")
def test_zero_non_autoincrement_pk(self):
Employee.objects.create(
employee_code=0, first_name="Frank", last_name="Jones"
)
employee = Employee.objects.get(pk=0)
self.assertEqual(employee.employee_code, 0)
def test_custom_field_pk(self):
# Regression for #10785 -- Custom fields can be used for primary keys.
new_bar = Bar.objects.create()
new_foo = Foo.objects.create(bar=new_bar)
f = Foo.objects.get(bar=new_bar.pk)
self.assertEqual(f, new_foo)
self.assertEqual(f.bar, new_bar)
f = Foo.objects.get(bar=new_bar)
self.assertEqual(f, new_foo),
self.assertEqual(f.bar, new_bar)
# SQLite lets objects be saved with an empty primary key, even though an
# integer is expected. So we can't check for an error being raised in that
# case for SQLite. Remove it from the suite for this next bit.
@skipIfDBFeature('supports_unspecified_pk')
def test_required_pk(self):
# The primary key must be specified, so an error is raised if you
# try to create an object without it.
with self.assertRaises(IntegrityError):
with transaction.atomic():
Employee.objects.create(first_name="Tom", last_name="Smith")
|
rmcgibbo/numpy
|
refs/heads/master
|
numpy/lib/utils.py
|
15
|
from __future__ import division, absolute_import, print_function
import os
import sys
import types
import re
import warnings
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
from numpy.core import ndarray, ufunc, asarray
__all__ = [
'issubclass_', 'issubsctype', 'issubdtype', 'deprecate',
'deprecate_with_doc', 'get_include', 'info', 'source', 'who',
'lookfor', 'byte_bounds', 'safe_eval'
]
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d
def _set_function_name(func, name):
func.__name__ = name
return func
class _Deprecate(object):
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
import warnings
if old_name is None:
try:
old_name = func.__name__
except AttributeError:
old_name = func.__name__
if new_name is None:
depdoc = "`%s` is deprecated!" % old_name
else:
depdoc = "`%s` is deprecated, use `%s` instead!" % \
(old_name, new_name)
if message is not None:
depdoc += "\n" + message
def newfunc(*args,**kwds):
"""`arrayrange` is deprecated, use `arange` instead!"""
warnings.warn(depdoc, DeprecationWarning)
return func(*args, **kwds)
newfunc = _set_function_name(newfunc, old_name)
doc = func.__doc__
if doc is None:
doc = depdoc
else:
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
d = func.__dict__
except AttributeError:
pass
else:
newfunc.__dict__.update(d)
return newfunc
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in
which case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case the
deprecation message is that `old_name` is deprecated. If given, the
deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation
Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
/usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:
DeprecationWarning: uint32 is deprecated
warnings.warn(str1, DeprecationWarning)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
if args:
fn = args[0]
args = args[1:]
# backward compatibility -- can be removed
# after next release
if 'newname' in kwargs:
kwargs['new_name'] = kwargs.pop('newname')
if 'oldname' in kwargs:
kwargs['old_name'] = kwargs.pop('oldname')
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
deprecate_with_doc = lambda msg: _Deprecate(message=msg)
#--------------------------------------------
# Determine if two arrays can share memory
#--------------------------------------------
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array
interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second
integer is just past the last byte of the array. If `a` is not
contiguous it will not use every byte between the (`low`, `high`)
values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2, dtype='G'); I.dtype
dtype('complex192')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
bytes_a = asarray(a).dtype.itemsize
a_low = a_high = a_data
if astrides is None:
# contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
def who(vardict=None):
"""
Print the Numpy arrays in the given dictionary.
If there is no dictionary passed in or `vardict` is None then returns
Numpy arrays in the globals() dictionary (all Numpy arrays in the
namespace).
Parameters
----------
vardict : dict, optional
A dictionary possibly containing ndarrays. Default is globals().
Returns
-------
out : None
Returns 'None'.
Notes
-----
Prints out the name, shape, bytes and type of all of the ndarrays
present in `vardict`.
Examples
--------
>>> a = np.arange(10)
>>> b = np.ones(20)
>>> np.who()
Name Shape Bytes Type
===========================================================
a 10 40 int32
b 20 160 float64
Upper bound on total bytes = 200
>>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
... 'idx':5}
>>> np.who(d)
Name Shape Bytes Type
===========================================================
y 3 24 float64
x 2 16 float64
Upper bound on total bytes = 40
"""
if vardict is None:
frame = sys._getframe().f_back
vardict = frame.f_globals
sta = []
cache = {}
for name in vardict.keys():
if isinstance(vardict[name], ndarray):
var = vardict[name]
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
original = 0
else:
cache[idv] = name
namestr = name
original = 1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
original])
maxname = 0
maxshape = 0
maxbyte = 0
totalbytes = 0
for k in range(len(sta)):
val = sta[k]
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
maxshape = len(val[1])
if maxbyte < len(val[2]):
maxbyte = len(val[2])
if val[4]:
totalbytes += int(val[2])
if len(sta) > 0:
sp1 = max(10, maxname)
sp2 = max(10, maxshape)
sp3 = max(10, maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print(prval + "\n" + "="*(len(prval)+5) + "\n")
for k in range(len(sta)):
val = sta[k]
print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
val[3]))
print("\nUpper bound on total bytes = %d" % totalbytes)
return
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of width
# characters. End lines on a comma and begin argument list indented with
# the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while True:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x], types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def _info(obj, output=sys.stdout):
"""Provide information about ndarray obj.
Parameters
----------
obj: ndarray
Must be ndarray, not checked.
output:
Where printed output goes.
Notes
-----
Copied over from the numarray module prior to its removal.
Adapted somewhat as only numpy is an option now.
Called by info.
"""
extra = ""
tic = ""
bp = lambda x: x
cls = getattr(obj, '__class__', type(obj))
nm = getattr(cls, '__name__', cls)
strides = obj.strides
endian = obj.dtype.byteorder
print("class: ", nm, file=output)
print("shape: ", obj.shape, file=output)
print("strides: ", strides, file=output)
print("itemsize: ", obj.itemsize, file=output)
print("aligned: ", bp(obj.flags.aligned), file=output)
print("contiguous: ", bp(obj.flags.contiguous), file=output)
print("fortran: ", obj.flags.fortran, file=output)
print(
"data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra),
file=output
)
print("byteorder: ", end=' ', file=output)
if endian in ['|', '=']:
print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
byteswap = False
elif endian == '>':
print("%sbig%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "big"
else:
print("%slittle%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "little"
print("byteswap: ", bp(byteswap), file=output)
print("type: %s" % obj.dtype, file=output)
def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
"""
Get help information for a function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
modules are searched for matching objects. If None, information
about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is
``stdout``. The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
See Also
--------
source, lookfor
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent
to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc
import inspect
if (hasattr(object, '_ppimport_importer') or
hasattr(object, '_ppimport_module')):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, ndarray):
_info(object, output=output)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print("\n "
"*** Repeat reference found in %s *** " % namestr,
file=output
)
else:
objlist.append(id(obj))
print(" *** Found in %s ***" % namestr, file=output)
info(obj)
print("-"*maxwidth, file=output)
numfound += 1
except KeyError:
pass
if numfound == 0:
print("Help for %s not found." % object, file=output)
else:
print("\n "
"*** Total of %d references found. ***" % numfound,
file=output
)
elif inspect.isfunction(object):
name = object.__name__
arguments = inspect.formatargspec(*inspect.getargspec(object))
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif inspect.isclass(object):
name = object.__name__
arguments = "()"
try:
if hasattr(object, '__init__'):
arguments = inspect.formatargspec(
*inspect.getargspec(object.__init__.__func__)
)
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
except:
pass
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object, '__init__'):
print(inspect.getdoc(object.__init__), file=output)
else:
print(inspect.getdoc(object), file=output)
methods = pydoc.allmethods(object)
if methods != []:
print("\n\nMethods:\n", file=output)
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(
inspect.getdoc(thisobj) or "None"
)
print(" %s -- %s" % (meth, methstr), file=output)
elif (sys.version_info[0] < 3
and isinstance(object, types.InstanceType)):
# check for __call__ method
# types.InstanceType is the type of the instances of oldstyle classes
print("Instance of class: ", object.__class__.__name__, file=output)
print(file=output)
if hasattr(object, '__call__'):
arguments = inspect.formatargspec(
*inspect.getargspec(object.__call__.__func__)
)
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if hasattr(object, 'name'):
name = "%s" % object.name
else:
name = "<name>"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc = inspect.getdoc(object.__call__)
if doc is not None:
print(inspect.getdoc(object.__call__), file=output)
print(inspect.getdoc(object), file=output)
else:
print(inspect.getdoc(object), file=output)
elif inspect.ismethod(object):
name = object.__name__
arguments = inspect.formatargspec(
*inspect.getargspec(object.__func__)
)
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif hasattr(object, '__doc__'):
print(inspect.getdoc(object), file=output)
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a Numpy object.
The source code is only returned for objects written in Python. Many
functions and classes are defined in C and will therefore not return
useful information.
Parameters
----------
object : numpy object
Input object. This can be any object (function, class, module,
...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
See Also
--------
lookfor, info
Examples
--------
>>> np.source(np.interp) #doctest: +SKIP
In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
def interp(x, xp, fp, left=None, right=None):
\"\"\".... (full docstring printed)\"\"\"
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
The source code is only returned for objects written in Python.
>>> np.source(np.array) #doctest: +SKIP
Not available for this object.
"""
# Local import to speed up numpy's import time.
import inspect
try:
print("In file: %s\n" % inspect.getsourcefile(object), file=output)
print(inspect.getsource(object), file=output)
except:
print("Not available for this object.", file=output)
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
# where kind: "func", "class", "module", "object"
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
# regexp whose match indicates that the string may contain a function
# signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
Do a keyword search on docstrings.
A list of of objects that matched the search is displayed,
sorted by relevance. All given keywords need to be found in the
docstring for it to be returned as a result, but the order does
not matter.
Parameters
----------
what : str
String containing words to look for.
module : str or list, optional
Name of module(s) whose docstrings to go through.
import_modules : bool, optional
Whether to import sub-modules in packages. Default is True.
regenerate : bool, optional
Whether to re-generate the docstring cache. Default is False.
output : file-like, optional
File-like object to write the output to. If omitted, use a pager.
See Also
--------
source, info
Notes
-----
Relevance is determined only roughly, by checking if the keywords occur
in the function name, at the start of a docstring, etc.
Examples
--------
>>> np.lookfor('binary representation')
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
numpy.core.setup_common.long_double_representation
Given a binary dump as given by GNU od -b, look for long double
numpy.base_repr
Return a string representation of a number in the given base system.
...
"""
import pydoc
# Cache
cache = _lookfor_generate_cache(module, import_modules, regenerate)
# Search
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
if not whats:
return
for name, (docstring, kind, index) in cache.items():
if kind in ('module', 'object'):
# don't show modules or objects
continue
ok = True
doc = docstring.lower()
for w in whats:
if w not in doc:
ok = False
break
if ok:
found.append(name)
# Relevance sort
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
r = 0
# do the keywords occur within the start of the docstring?
first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
r += sum([200 for w in whats if w in first_doc])
# do the keywords occur in the function name?
r += sum([30 for w in whats if w in name])
# is the full name long?
r += -len(name) * 5
# is the object of bad type?
r += kind_relevance.get(kind, -1000)
# is the object deep in namespace hierarchy?
r += -name.count('.') * 10
r += max(-index / 100, -100)
return r
def relevance_value(a):
return relevance(a, *cache[a])
found.sort(key=relevance_value)
# Pretty-print
s = "Search results for '%s'" % (' '.join(whats))
help_text = [s, "-"*len(s)]
for name in found[::-1]:
doc, kind, ix = cache[name]
doclines = [line.strip() for line in doc.strip().split("\n")
if line.strip()]
# find a suitable short description
try:
first_doc = doclines[0].strip()
if _function_signature_re.search(first_doc):
first_doc = doclines[1].strip()
except IndexError:
first_doc = ""
help_text.append("%s\n %s" % (name, first_doc))
if not found:
help_text.append("Nothing found.")
# Output
if output is not None:
output.write("\n".join(help_text))
elif len(help_text) > 10:
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
print("\n".join(help_text))
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate : bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
if sys.version_info[0] >= 3:
# In Python3 stderr, stdout are text files.
from io import StringIO
else:
from StringIO import StringIO
if module is None:
module = "numpy"
if isinstance(module, str):
try:
__import__(module)
except ImportError:
return {}
module = sys.modules[module]
elif isinstance(module, list) or isinstance(module, tuple):
cache = {}
for mod in module:
cache.update(_lookfor_generate_cache(mod, import_modules,
regenerate))
return cache
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen:
continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
if (os.path.isfile(this_py) and
mod_path.endswith('.py')):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
else:
continue
if to_import == '__init__':
continue
try:
# Catch SystemExit, too
base_exc = BaseException
except NameError:
# Python 2.4 doesn't have BaseException
base_exc = Exception
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
__import__("%s.%s" % (name, to_import))
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
except base_exc:
continue
for n, v in _getmembers(item):
try:
item_name = getattr(v, '__name__', "%s.%s" % (name, n))
mod_name = getattr(v, '__module__', None)
except NameError:
# ref. SWIG's global cvars
# NameError: Unknown C global variable
item_name = "%s.%s" % (name, n)
mod_name = None
if '.' not in item_name and mod_name:
item_name = "%s.%s" % (mod_name, item_name)
if not item_name.startswith(name + '.'):
# don't crawl "foreign" objects
if isinstance(v, ufunc):
# ... unless they are ufuncs
pass
else:
continue
elif not (inspect.ismodule(v) or _all is None or n in _all):
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in _getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif hasattr(item, "__call__"):
kind = "func"
try:
doc = inspect.getdoc(item)
except NameError:
# ref SWIG's NameError: Unknown C global variable
doc = None
if doc is not None:
cache[name] = (doc, kind, index)
return cache
def _getmembers(item):
import inspect
try:
members = inspect.getmembers(item)
except AttributeError:
members = [(x, getattr(item, x)) for x in dir(item)
if hasattr(item, x)]
return members
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
# ASPN Python Cookbook recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
# Accordingly it is mostly Copyright 2006 by Michael Spencer.
# The recipe, like most of the other ASPN Python Cookbook recipes was made
# available under the Python license.
# http://www.python.org/license
# It has been modified to:
# * handle unary -/+
# * support True/False/None
# * raise SyntaxError instead of a custom exception.
class SafeEval(object):
"""
Object to evaluate constant string expressions.
This includes strings with lists, dicts and tuples using the abstract
syntax tree created by ``compiler.parse``.
.. deprecated:: 1.10.0
See Also
--------
safe_eval
"""
def __init__(self):
warnings.warn("SafeEval is deprecated in 1.10 and will be removed.",
DeprecationWarning)
def visit(self, node):
cls = node.__class__
meth = getattr(self, 'visit' + cls.__name__, self.default)
return meth(node)
def default(self, node):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node):
return self.visit(node.body)
def visitNum(self, node):
return node.n
def visitStr(self, node):
return node.s
def visitBytes(self, node):
return node.s
def visitDict(self, node,**kw):
return dict([(self.visit(k), self.visit(v))
for k, v in zip(node.keys, node.values)])
def visitTuple(self, node):
return tuple([self.visit(i) for i in node.elts])
def visitList(self, node):
return [self.visit(i) for i in node.elts]
def visitUnaryOp(self, node):
import ast
if isinstance(node.op, ast.UAdd):
return +self.visit(node.operand)
elif isinstance(node.op, ast.USub):
return -self.visit(node.operand)
else:
raise SyntaxError("Unknown unary op: %r" % node.op)
def visitName(self, node):
if node.id == 'False':
return False
elif node.id == 'True':
return True
elif node.id == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.id)
def visitNameConstant(self, node):
return node.value
def safe_eval(source):
"""
Protected string evaluation.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains
non-literal code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
"""
# Local import to speed up numpy's import time.
import ast
return ast.literal_eval(source)
#-----------------------------------------------------------------------------
|
brian-yang/mozillians
|
refs/heads/master
|
vendor-local/lib/python/unidecode/x0b1.py
|
253
|
data = (
'nyaess', # 0x00
'nyaeng', # 0x01
'nyaej', # 0x02
'nyaec', # 0x03
'nyaek', # 0x04
'nyaet', # 0x05
'nyaep', # 0x06
'nyaeh', # 0x07
'neo', # 0x08
'neog', # 0x09
'neogg', # 0x0a
'neogs', # 0x0b
'neon', # 0x0c
'neonj', # 0x0d
'neonh', # 0x0e
'neod', # 0x0f
'neol', # 0x10
'neolg', # 0x11
'neolm', # 0x12
'neolb', # 0x13
'neols', # 0x14
'neolt', # 0x15
'neolp', # 0x16
'neolh', # 0x17
'neom', # 0x18
'neob', # 0x19
'neobs', # 0x1a
'neos', # 0x1b
'neoss', # 0x1c
'neong', # 0x1d
'neoj', # 0x1e
'neoc', # 0x1f
'neok', # 0x20
'neot', # 0x21
'neop', # 0x22
'neoh', # 0x23
'ne', # 0x24
'neg', # 0x25
'negg', # 0x26
'negs', # 0x27
'nen', # 0x28
'nenj', # 0x29
'nenh', # 0x2a
'ned', # 0x2b
'nel', # 0x2c
'nelg', # 0x2d
'nelm', # 0x2e
'nelb', # 0x2f
'nels', # 0x30
'nelt', # 0x31
'nelp', # 0x32
'nelh', # 0x33
'nem', # 0x34
'neb', # 0x35
'nebs', # 0x36
'nes', # 0x37
'ness', # 0x38
'neng', # 0x39
'nej', # 0x3a
'nec', # 0x3b
'nek', # 0x3c
'net', # 0x3d
'nep', # 0x3e
'neh', # 0x3f
'nyeo', # 0x40
'nyeog', # 0x41
'nyeogg', # 0x42
'nyeogs', # 0x43
'nyeon', # 0x44
'nyeonj', # 0x45
'nyeonh', # 0x46
'nyeod', # 0x47
'nyeol', # 0x48
'nyeolg', # 0x49
'nyeolm', # 0x4a
'nyeolb', # 0x4b
'nyeols', # 0x4c
'nyeolt', # 0x4d
'nyeolp', # 0x4e
'nyeolh', # 0x4f
'nyeom', # 0x50
'nyeob', # 0x51
'nyeobs', # 0x52
'nyeos', # 0x53
'nyeoss', # 0x54
'nyeong', # 0x55
'nyeoj', # 0x56
'nyeoc', # 0x57
'nyeok', # 0x58
'nyeot', # 0x59
'nyeop', # 0x5a
'nyeoh', # 0x5b
'nye', # 0x5c
'nyeg', # 0x5d
'nyegg', # 0x5e
'nyegs', # 0x5f
'nyen', # 0x60
'nyenj', # 0x61
'nyenh', # 0x62
'nyed', # 0x63
'nyel', # 0x64
'nyelg', # 0x65
'nyelm', # 0x66
'nyelb', # 0x67
'nyels', # 0x68
'nyelt', # 0x69
'nyelp', # 0x6a
'nyelh', # 0x6b
'nyem', # 0x6c
'nyeb', # 0x6d
'nyebs', # 0x6e
'nyes', # 0x6f
'nyess', # 0x70
'nyeng', # 0x71
'nyej', # 0x72
'nyec', # 0x73
'nyek', # 0x74
'nyet', # 0x75
'nyep', # 0x76
'nyeh', # 0x77
'no', # 0x78
'nog', # 0x79
'nogg', # 0x7a
'nogs', # 0x7b
'non', # 0x7c
'nonj', # 0x7d
'nonh', # 0x7e
'nod', # 0x7f
'nol', # 0x80
'nolg', # 0x81
'nolm', # 0x82
'nolb', # 0x83
'nols', # 0x84
'nolt', # 0x85
'nolp', # 0x86
'nolh', # 0x87
'nom', # 0x88
'nob', # 0x89
'nobs', # 0x8a
'nos', # 0x8b
'noss', # 0x8c
'nong', # 0x8d
'noj', # 0x8e
'noc', # 0x8f
'nok', # 0x90
'not', # 0x91
'nop', # 0x92
'noh', # 0x93
'nwa', # 0x94
'nwag', # 0x95
'nwagg', # 0x96
'nwags', # 0x97
'nwan', # 0x98
'nwanj', # 0x99
'nwanh', # 0x9a
'nwad', # 0x9b
'nwal', # 0x9c
'nwalg', # 0x9d
'nwalm', # 0x9e
'nwalb', # 0x9f
'nwals', # 0xa0
'nwalt', # 0xa1
'nwalp', # 0xa2
'nwalh', # 0xa3
'nwam', # 0xa4
'nwab', # 0xa5
'nwabs', # 0xa6
'nwas', # 0xa7
'nwass', # 0xa8
'nwang', # 0xa9
'nwaj', # 0xaa
'nwac', # 0xab
'nwak', # 0xac
'nwat', # 0xad
'nwap', # 0xae
'nwah', # 0xaf
'nwae', # 0xb0
'nwaeg', # 0xb1
'nwaegg', # 0xb2
'nwaegs', # 0xb3
'nwaen', # 0xb4
'nwaenj', # 0xb5
'nwaenh', # 0xb6
'nwaed', # 0xb7
'nwael', # 0xb8
'nwaelg', # 0xb9
'nwaelm', # 0xba
'nwaelb', # 0xbb
'nwaels', # 0xbc
'nwaelt', # 0xbd
'nwaelp', # 0xbe
'nwaelh', # 0xbf
'nwaem', # 0xc0
'nwaeb', # 0xc1
'nwaebs', # 0xc2
'nwaes', # 0xc3
'nwaess', # 0xc4
'nwaeng', # 0xc5
'nwaej', # 0xc6
'nwaec', # 0xc7
'nwaek', # 0xc8
'nwaet', # 0xc9
'nwaep', # 0xca
'nwaeh', # 0xcb
'noe', # 0xcc
'noeg', # 0xcd
'noegg', # 0xce
'noegs', # 0xcf
'noen', # 0xd0
'noenj', # 0xd1
'noenh', # 0xd2
'noed', # 0xd3
'noel', # 0xd4
'noelg', # 0xd5
'noelm', # 0xd6
'noelb', # 0xd7
'noels', # 0xd8
'noelt', # 0xd9
'noelp', # 0xda
'noelh', # 0xdb
'noem', # 0xdc
'noeb', # 0xdd
'noebs', # 0xde
'noes', # 0xdf
'noess', # 0xe0
'noeng', # 0xe1
'noej', # 0xe2
'noec', # 0xe3
'noek', # 0xe4
'noet', # 0xe5
'noep', # 0xe6
'noeh', # 0xe7
'nyo', # 0xe8
'nyog', # 0xe9
'nyogg', # 0xea
'nyogs', # 0xeb
'nyon', # 0xec
'nyonj', # 0xed
'nyonh', # 0xee
'nyod', # 0xef
'nyol', # 0xf0
'nyolg', # 0xf1
'nyolm', # 0xf2
'nyolb', # 0xf3
'nyols', # 0xf4
'nyolt', # 0xf5
'nyolp', # 0xf6
'nyolh', # 0xf7
'nyom', # 0xf8
'nyob', # 0xf9
'nyobs', # 0xfa
'nyos', # 0xfb
'nyoss', # 0xfc
'nyong', # 0xfd
'nyoj', # 0xfe
'nyoc', # 0xff
)
|
VasilyNemkov/percona-xtrabackup
|
refs/heads/2.3
|
storage/innobase/xtrabackup/test/python/testtools/monkey.py
|
64
|
# Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Helpers for monkey-patching Python code."""
__all__ = [
'MonkeyPatcher',
'patch',
]
class MonkeyPatcher(object):
"""A set of monkey-patches that can be applied and removed all together.
Use this to cover up attributes with new objects. Particularly useful for
testing difficult code.
"""
# Marker used to indicate that the patched attribute did not exist on the
# object before we patched it.
_NO_SUCH_ATTRIBUTE = object()
def __init__(self, *patches):
"""Construct a `MonkeyPatcher`.
:param patches: The patches to apply, each should be (obj, name,
new_value). Providing patches here is equivalent to calling
`add_patch`.
"""
# List of patches to apply in (obj, name, value).
self._patches_to_apply = []
# List of the original values for things that have been patched.
# (obj, name, value) format.
self._originals = []
for patch in patches:
self.add_patch(*patch)
def add_patch(self, obj, name, value):
"""Add a patch to overwrite 'name' on 'obj' with 'value'.
The attribute C{name} on C{obj} will be assigned to C{value} when
C{patch} is called or during C{run_with_patches}.
You can restore the original values with a call to restore().
"""
self._patches_to_apply.append((obj, name, value))
def patch(self):
"""Apply all of the patches that have been specified with `add_patch`.
Reverse this operation using L{restore}.
"""
for obj, name, value in self._patches_to_apply:
original_value = getattr(obj, name, self._NO_SUCH_ATTRIBUTE)
self._originals.append((obj, name, original_value))
setattr(obj, name, value)
def restore(self):
"""Restore all original values to any patched objects.
If the patched attribute did not exist on an object before it was
patched, `restore` will delete the attribute so as to return the
object to its original state.
"""
while self._originals:
obj, name, value = self._originals.pop()
if value is self._NO_SUCH_ATTRIBUTE:
delattr(obj, name)
else:
setattr(obj, name, value)
def run_with_patches(self, f, *args, **kw):
"""Run 'f' with the given args and kwargs with all patches applied.
Restores all objects to their original state when finished.
"""
self.patch()
try:
return f(*args, **kw)
finally:
self.restore()
def patch(obj, attribute, value):
"""Set 'obj.attribute' to 'value' and return a callable to restore 'obj'.
If 'attribute' is not set on 'obj' already, then the returned callable
will delete the attribute when called.
:param obj: An object to monkey-patch.
:param attribute: The name of the attribute to patch.
:param value: The value to set 'obj.attribute' to.
:return: A nullary callable that, when run, will restore 'obj' to its
original state.
"""
patcher = MonkeyPatcher((obj, attribute, value))
patcher.patch()
return patcher.restore
|
goldeneye-source/ges-python
|
refs/heads/master
|
lib/email/mime/nonmultipart.py
|
335
|
# Copyright (C) 2002-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Base class for MIME type messages that are not multipart."""
__all__ = ['MIMENonMultipart']
from email import errors
from email.mime.base import MIMEBase
class MIMENonMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
def attach(self, payload):
# The public API prohibits attaching multiple subparts to MIMEBase
# derived subtypes since none of them are, by definition, of content
# type multipart/*
raise errors.MultipartConversionError(
'Cannot attach additional subparts to non-multipart/*')
|
Oslandia/vizitown_plugin
|
refs/heads/master
|
twisted/web/microdom.py
|
40
|
# -*- test-case-name: twisted.web.test.test_xml -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Micro Document Object Model: a partial DOM implementation with SUX.
This is an implementation of what we consider to be the useful subset of the
DOM. The chief advantage of this library is that, not being burdened with
standards compliance, it can remain very stable between versions. We can also
implement utility 'pythonic' ways to access and mutate the XML tree.
Since this has not subjected to a serious trial by fire, it is not recommended
to use this outside of Twisted applications. However, it seems to work just
fine for the documentation generator, which parses a fairly representative
sample of XML.
Microdom mainly focuses on working with HTML and XHTML.
"""
# System Imports
import re
from cStringIO import StringIO
# create NodeList class
from types import ListType as NodeList
from types import StringTypes, UnicodeType
# Twisted Imports
from twisted.web.sux import XMLParser, ParseError
from twisted.python.util import InsensitiveDict
def getElementsByTagName(iNode, name):
"""
Return a list of all child elements of C{iNode} with a name matching
C{name}.
Note that this implementation does not conform to the DOM Level 1 Core
specification because it may return C{iNode}.
@param iNode: An element at which to begin searching. If C{iNode} has a
name matching C{name}, it will be included in the result.
@param name: A C{str} giving the name of the elements to return.
@return: A C{list} of direct or indirect child elements of C{iNode} with
the name C{name}. This may include C{iNode}.
"""
matches = []
matches_append = matches.append # faster lookup. don't do this at home
slice = [iNode]
while len(slice)>0:
c = slice.pop(0)
if c.nodeName == name:
matches_append(c)
slice[:0] = c.childNodes
return matches
def getElementsByTagNameNoCase(iNode, name):
name = name.lower()
matches = []
matches_append = matches.append
slice=[iNode]
while len(slice)>0:
c = slice.pop(0)
if c.nodeName.lower() == name:
matches_append(c)
slice[:0] = c.childNodes
return matches
# order is important
HTML_ESCAPE_CHARS = (('&', '&'), # don't add any entities before this one
('<', '<'),
('>', '>'),
('"', '"'))
REV_HTML_ESCAPE_CHARS = list(HTML_ESCAPE_CHARS)
REV_HTML_ESCAPE_CHARS.reverse()
XML_ESCAPE_CHARS = HTML_ESCAPE_CHARS + (("'", '''),)
REV_XML_ESCAPE_CHARS = list(XML_ESCAPE_CHARS)
REV_XML_ESCAPE_CHARS.reverse()
def unescape(text, chars=REV_HTML_ESCAPE_CHARS):
"Perform the exact opposite of 'escape'."
for s, h in chars:
text = text.replace(h, s)
return text
def escape(text, chars=HTML_ESCAPE_CHARS):
"Escape a few XML special chars with XML entities."
for s, h in chars:
text = text.replace(s, h)
return text
class MismatchedTags(Exception):
def __init__(self, filename, expect, got, endLine, endCol, begLine, begCol):
(self.filename, self.expect, self.got, self.begLine, self.begCol, self.endLine,
self.endCol) = filename, expect, got, begLine, begCol, endLine, endCol
def __str__(self):
return ("expected </%s>, got </%s> line: %s col: %s, began line: %s col: %s"
% (self.expect, self.got, self.endLine, self.endCol, self.begLine,
self.begCol))
class Node(object):
nodeName = "Node"
def __init__(self, parentNode=None):
self.parentNode = parentNode
self.childNodes = []
def isEqualToNode(self, other):
"""
Compare this node to C{other}. If the nodes have the same number of
children and corresponding children are equal to each other, return
C{True}, otherwise return C{False}.
@type other: L{Node}
@rtype: C{bool}
"""
if len(self.childNodes) != len(other.childNodes):
return False
for a, b in zip(self.childNodes, other.childNodes):
if not a.isEqualToNode(b):
return False
return True
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
raise NotImplementedError()
def toxml(self, indent='', addindent='', newl='', strip=0, nsprefixes={},
namespace=''):
s = StringIO()
self.writexml(s, indent, addindent, newl, strip, nsprefixes, namespace)
rv = s.getvalue()
return rv
def writeprettyxml(self, stream, indent='', addindent=' ', newl='\n', strip=0):
return self.writexml(stream, indent, addindent, newl, strip)
def toprettyxml(self, indent='', addindent=' ', newl='\n', strip=0):
return self.toxml(indent, addindent, newl, strip)
def cloneNode(self, deep=0, parent=None):
raise NotImplementedError()
def hasChildNodes(self):
if self.childNodes:
return 1
else:
return 0
def appendChild(self, child):
"""
Make the given L{Node} the last child of this node.
@param child: The L{Node} which will become a child of this node.
@raise TypeError: If C{child} is not a C{Node} instance.
"""
if not isinstance(child, Node):
raise TypeError("expected Node instance")
self.childNodes.append(child)
child.parentNode = self
def insertBefore(self, new, ref):
"""
Make the given L{Node} C{new} a child of this node which comes before
the L{Node} C{ref}.
@param new: A L{Node} which will become a child of this node.
@param ref: A L{Node} which is already a child of this node which
C{new} will be inserted before.
@raise TypeError: If C{new} or C{ref} is not a C{Node} instance.
@return: C{new}
"""
if not isinstance(new, Node) or not isinstance(ref, Node):
raise TypeError("expected Node instance")
i = self.childNodes.index(ref)
new.parentNode = self
self.childNodes.insert(i, new)
return new
def removeChild(self, child):
"""
Remove the given L{Node} from this node's children.
@param child: A L{Node} which is a child of this node which will no
longer be a child of this node after this method is called.
@raise TypeError: If C{child} is not a C{Node} instance.
@return: C{child}
"""
if not isinstance(child, Node):
raise TypeError("expected Node instance")
if child in self.childNodes:
self.childNodes.remove(child)
child.parentNode = None
return child
def replaceChild(self, newChild, oldChild):
"""
Replace a L{Node} which is already a child of this node with a
different node.
@param newChild: A L{Node} which will be made a child of this node.
@param oldChild: A L{Node} which is a child of this node which will
give up its position to C{newChild}.
@raise TypeError: If C{newChild} or C{oldChild} is not a C{Node}
instance.
@raise ValueError: If C{oldChild} is not a child of this C{Node}.
"""
if not isinstance(newChild, Node) or not isinstance(oldChild, Node):
raise TypeError("expected Node instance")
if oldChild.parentNode is not self:
raise ValueError("oldChild is not a child of this node")
self.childNodes[self.childNodes.index(oldChild)] = newChild
oldChild.parentNode = None
newChild.parentNode = self
def lastChild(self):
return self.childNodes[-1]
def firstChild(self):
if len(self.childNodes):
return self.childNodes[0]
return None
#def get_ownerDocument(self):
# """This doesn't really get the owner document; microdom nodes
# don't even have one necessarily. This gets the root node,
# which is usually what you really meant.
# *NOT DOM COMPLIANT.*
# """
# node=self
# while (node.parentNode): node=node.parentNode
# return node
#ownerDocument=node.get_ownerDocument()
# leaving commented for discussion; see also domhelpers.getParents(node)
class Document(Node):
def __init__(self, documentElement=None):
Node.__init__(self)
if documentElement:
self.appendChild(documentElement)
def cloneNode(self, deep=0, parent=None):
d = Document()
d.doctype = self.doctype
if deep:
newEl = self.documentElement.cloneNode(1, self)
else:
newEl = self.documentElement
d.appendChild(newEl)
return d
doctype = None
def isEqualToDocument(self, n):
return (self.doctype == n.doctype) and Node.isEqualToNode(self, n)
isEqualToNode = isEqualToDocument
def get_documentElement(self):
return self.childNodes[0]
documentElement=property(get_documentElement)
def appendChild(self, child):
"""
Make the given L{Node} the I{document element} of this L{Document}.
@param child: The L{Node} to make into this L{Document}'s document
element.
@raise ValueError: If this document already has a document element.
"""
if self.childNodes:
raise ValueError("Only one element per document.")
Node.appendChild(self, child)
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
stream.write('<?xml version="1.0"?>' + newl)
if self.doctype:
stream.write("<!DOCTYPE "+self.doctype+">" + newl)
self.documentElement.writexml(stream, indent, addindent, newl, strip,
nsprefixes, namespace)
# of dubious utility (?)
def createElement(self, name, **kw):
return Element(name, **kw)
def createTextNode(self, text):
return Text(text)
def createComment(self, text):
return Comment(text)
def getElementsByTagName(self, name):
if self.documentElement.caseInsensitive:
return getElementsByTagNameNoCase(self, name)
return getElementsByTagName(self, name)
def getElementById(self, id):
childNodes = self.childNodes[:]
while childNodes:
node = childNodes.pop(0)
if node.childNodes:
childNodes.extend(node.childNodes)
if hasattr(node, 'getAttribute') and node.getAttribute("id") == id:
return node
class EntityReference(Node):
def __init__(self, eref, parentNode=None):
Node.__init__(self, parentNode)
self.eref = eref
self.nodeValue = self.data = "&" + eref + ";"
def isEqualToEntityReference(self, n):
if not isinstance(n, EntityReference):
return 0
return (self.eref == n.eref) and (self.nodeValue == n.nodeValue)
isEqualToNode = isEqualToEntityReference
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
stream.write(self.nodeValue)
def cloneNode(self, deep=0, parent=None):
return EntityReference(self.eref, parent)
class CharacterData(Node):
def __init__(self, data, parentNode=None):
Node.__init__(self, parentNode)
self.value = self.data = self.nodeValue = data
def isEqualToCharacterData(self, n):
return self.value == n.value
isEqualToNode = isEqualToCharacterData
class Comment(CharacterData):
"""A comment node."""
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
val=self.data
if isinstance(val, UnicodeType):
val=val.encode('utf8')
stream.write("<!--%s-->" % val)
def cloneNode(self, deep=0, parent=None):
return Comment(self.nodeValue, parent)
class Text(CharacterData):
def __init__(self, data, parentNode=None, raw=0):
CharacterData.__init__(self, data, parentNode)
self.raw = raw
def isEqualToNode(self, other):
"""
Compare this text to C{text}. If the underlying values and the C{raw}
flag are the same, return C{True}, otherwise return C{False}.
"""
return (
CharacterData.isEqualToNode(self, other) and
self.raw == other.raw)
def cloneNode(self, deep=0, parent=None):
return Text(self.nodeValue, parent, self.raw)
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
if self.raw:
val = self.nodeValue
if not isinstance(val, StringTypes):
val = str(self.nodeValue)
else:
v = self.nodeValue
if not isinstance(v, StringTypes):
v = str(v)
if strip:
v = ' '.join(v.split())
val = escape(v)
if isinstance(val, UnicodeType):
val = val.encode('utf8')
stream.write(val)
def __repr__(self):
return "Text(%s" % repr(self.nodeValue) + ')'
class CDATASection(CharacterData):
def cloneNode(self, deep=0, parent=None):
return CDATASection(self.nodeValue, parent)
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
stream.write("<![CDATA[")
stream.write(self.nodeValue)
stream.write("]]>")
def _genprefix():
i = 0
while True:
yield 'p' + str(i)
i = i + 1
genprefix = _genprefix().next
class _Attr(CharacterData):
"Support class for getAttributeNode."
class Element(Node):
preserveCase = 0
caseInsensitive = 1
nsprefixes = None
def __init__(self, tagName, attributes=None, parentNode=None,
filename=None, markpos=None,
caseInsensitive=1, preserveCase=0,
namespace=None):
Node.__init__(self, parentNode)
self.preserveCase = preserveCase or not caseInsensitive
self.caseInsensitive = caseInsensitive
if not preserveCase:
tagName = tagName.lower()
if attributes is None:
self.attributes = {}
else:
self.attributes = attributes
for k, v in self.attributes.items():
self.attributes[k] = unescape(v)
if caseInsensitive:
self.attributes = InsensitiveDict(self.attributes,
preserve=preserveCase)
self.endTagName = self.nodeName = self.tagName = tagName
self._filename = filename
self._markpos = markpos
self.namespace = namespace
def addPrefixes(self, pfxs):
if self.nsprefixes is None:
self.nsprefixes = pfxs
else:
self.nsprefixes.update(pfxs)
def endTag(self, endTagName):
if not self.preserveCase:
endTagName = endTagName.lower()
self.endTagName = endTagName
def isEqualToElement(self, n):
if self.caseInsensitive:
return ((self.attributes == n.attributes)
and (self.nodeName.lower() == n.nodeName.lower()))
return (self.attributes == n.attributes) and (self.nodeName == n.nodeName)
def isEqualToNode(self, other):
"""
Compare this element to C{other}. If the C{nodeName}, C{namespace},
C{attributes}, and C{childNodes} are all the same, return C{True},
otherwise return C{False}.
"""
return (
self.nodeName.lower() == other.nodeName.lower() and
self.namespace == other.namespace and
self.attributes == other.attributes and
Node.isEqualToNode(self, other))
def cloneNode(self, deep=0, parent=None):
clone = Element(
self.tagName, parentNode=parent, namespace=self.namespace,
preserveCase=self.preserveCase, caseInsensitive=self.caseInsensitive)
clone.attributes.update(self.attributes)
if deep:
clone.childNodes = [child.cloneNode(1, clone) for child in self.childNodes]
else:
clone.childNodes = []
return clone
def getElementsByTagName(self, name):
if self.caseInsensitive:
return getElementsByTagNameNoCase(self, name)
return getElementsByTagName(self, name)
def hasAttributes(self):
return 1
def getAttribute(self, name, default=None):
return self.attributes.get(name, default)
def getAttributeNS(self, ns, name, default=None):
nsk = (ns, name)
if self.attributes.has_key(nsk):
return self.attributes[nsk]
if ns == self.namespace:
return self.attributes.get(name, default)
return default
def getAttributeNode(self, name):
return _Attr(self.getAttribute(name), self)
def setAttribute(self, name, attr):
self.attributes[name] = attr
def removeAttribute(self, name):
if name in self.attributes:
del self.attributes[name]
def hasAttribute(self, name):
return name in self.attributes
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
"""
Serialize this L{Element} to the given stream.
@param stream: A file-like object to which this L{Element} will be
written.
@param nsprefixes: A C{dict} mapping namespace URIs as C{str} to
prefixes as C{str}. This defines the prefixes which are already in
scope in the document at the point at which this L{Element} exists.
This is essentially an implementation detail for namespace support.
Applications should not try to use it.
@param namespace: The namespace URI as a C{str} which is the default at
the point in the document at which this L{Element} exists. This is
essentially an implementation detail for namespace support.
Applications should not try to use it.
"""
# write beginning
ALLOWSINGLETON = ('img', 'br', 'hr', 'base', 'meta', 'link', 'param',
'area', 'input', 'col', 'basefont', 'isindex',
'frame')
BLOCKELEMENTS = ('html', 'head', 'body', 'noscript', 'ins', 'del',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'script',
'ul', 'ol', 'dl', 'pre', 'hr', 'blockquote',
'address', 'p', 'div', 'fieldset', 'table', 'tr',
'form', 'object', 'fieldset', 'applet', 'map')
FORMATNICELY = ('tr', 'ul', 'ol', 'head')
# this should never be necessary unless people start
# changing .tagName on the fly(?)
if not self.preserveCase:
self.endTagName = self.tagName
w = stream.write
if self.nsprefixes:
newprefixes = self.nsprefixes.copy()
for ns in nsprefixes.keys():
if ns in newprefixes:
del newprefixes[ns]
else:
newprefixes = {}
begin = ['<']
if self.tagName in BLOCKELEMENTS:
begin = [newl, indent] + begin
bext = begin.extend
writeattr = lambda _atr, _val: bext((' ', _atr, '="', escape(_val), '"'))
# Make a local for tracking what end tag will be used. If namespace
# prefixes are involved, this will be changed to account for that
# before it's actually used.
endTagName = self.endTagName
if namespace != self.namespace and self.namespace is not None:
# If the current default namespace is not the namespace of this tag
# (and this tag has a namespace at all) then we'll write out
# something related to namespaces.
if self.namespace in nsprefixes:
# This tag's namespace already has a prefix bound to it. Use
# that prefix.
prefix = nsprefixes[self.namespace]
bext(prefix + ':' + self.tagName)
# Also make sure we use it for the end tag.
endTagName = prefix + ':' + self.endTagName
else:
# This tag's namespace has no prefix bound to it. Change the
# default namespace to this tag's namespace so we don't need
# prefixes. Alternatively, we could add a new prefix binding.
# I'm not sure why the code was written one way rather than the
# other. -exarkun
bext(self.tagName)
writeattr("xmlns", self.namespace)
# The default namespace just changed. Make sure any children
# know about this.
namespace = self.namespace
else:
# This tag has no namespace or its namespace is already the default
# namespace. Nothing extra to do here.
bext(self.tagName)
j = ''.join
for attr, val in self.attributes.iteritems():
if isinstance(attr, tuple):
ns, key = attr
if nsprefixes.has_key(ns):
prefix = nsprefixes[ns]
else:
prefix = genprefix()
newprefixes[ns] = prefix
assert val is not None
writeattr(prefix+':'+key,val)
else:
assert val is not None
writeattr(attr, val)
if newprefixes:
for ns, prefix in newprefixes.iteritems():
if prefix:
writeattr('xmlns:'+prefix, ns)
newprefixes.update(nsprefixes)
downprefixes = newprefixes
else:
downprefixes = nsprefixes
w(j(begin))
if self.childNodes:
w(">")
newindent = indent + addindent
for child in self.childNodes:
if self.tagName in BLOCKELEMENTS and \
self.tagName in FORMATNICELY:
w(j((newl, newindent)))
child.writexml(stream, newindent, addindent, newl, strip,
downprefixes, namespace)
if self.tagName in BLOCKELEMENTS:
w(j((newl, indent)))
w(j(('</', endTagName, '>')))
elif self.tagName.lower() not in ALLOWSINGLETON:
w(j(('></', endTagName, '>')))
else:
w(" />")
def __repr__(self):
rep = "Element(%s" % repr(self.nodeName)
if self.attributes:
rep += ", attributes=%r" % (self.attributes,)
if self._filename:
rep += ", filename=%r" % (self._filename,)
if self._markpos:
rep += ", markpos=%r" % (self._markpos,)
return rep + ')'
def __str__(self):
rep = "<" + self.nodeName
if self._filename or self._markpos:
rep += " ("
if self._filename:
rep += repr(self._filename)
if self._markpos:
rep += " line %s column %s" % self._markpos
if self._filename or self._markpos:
rep += ")"
for item in self.attributes.items():
rep += " %s=%r" % item
if self.hasChildNodes():
rep += " >...</%s>" % self.nodeName
else:
rep += " />"
return rep
def _unescapeDict(d):
dd = {}
for k, v in d.items():
dd[k] = unescape(v)
return dd
def _reverseDict(d):
dd = {}
for k, v in d.items():
dd[v]=k
return dd
class MicroDOMParser(XMLParser):
# <dash> glyph: a quick scan thru the DTD says BODY, AREA, LINK, IMG, HR,
# P, DT, DD, LI, INPUT, OPTION, THEAD, TFOOT, TBODY, COLGROUP, COL, TR, TH,
# TD, HEAD, BASE, META, HTML all have optional closing tags
soonClosers = 'area link br img hr input base meta'.split()
laterClosers = {'p': ['p', 'dt'],
'dt': ['dt','dd'],
'dd': ['dt', 'dd'],
'li': ['li'],
'tbody': ['thead', 'tfoot', 'tbody'],
'thead': ['thead', 'tfoot', 'tbody'],
'tfoot': ['thead', 'tfoot', 'tbody'],
'colgroup': ['colgroup'],
'col': ['col'],
'tr': ['tr'],
'td': ['td'],
'th': ['th'],
'head': ['body'],
'title': ['head', 'body'], # this looks wrong...
'option': ['option'],
}
def __init__(self, beExtremelyLenient=0, caseInsensitive=1, preserveCase=0,
soonClosers=soonClosers, laterClosers=laterClosers):
self.elementstack = []
d = {'xmlns': 'xmlns', '': None}
dr = _reverseDict(d)
self.nsstack = [(d,None,dr)]
self.documents = []
self._mddoctype = None
self.beExtremelyLenient = beExtremelyLenient
self.caseInsensitive = caseInsensitive
self.preserveCase = preserveCase or not caseInsensitive
self.soonClosers = soonClosers
self.laterClosers = laterClosers
# self.indentlevel = 0
def shouldPreserveSpace(self):
for edx in xrange(len(self.elementstack)):
el = self.elementstack[-edx]
if el.tagName == 'pre' or el.getAttribute("xml:space", '') == 'preserve':
return 1
return 0
def _getparent(self):
if self.elementstack:
return self.elementstack[-1]
else:
return None
COMMENT = re.compile(r"\s*/[/*]\s*")
def _fixScriptElement(self, el):
# this deals with case where there is comment or CDATA inside
# <script> tag and we want to do the right thing with it
if not self.beExtremelyLenient or not len(el.childNodes) == 1:
return
c = el.firstChild()
if isinstance(c, Text):
# deal with nasty people who do stuff like:
# <script> // <!--
# x = 1;
# // --></script>
# tidy does this, for example.
prefix = ""
oldvalue = c.value
match = self.COMMENT.match(oldvalue)
if match:
prefix = match.group()
oldvalue = oldvalue[len(prefix):]
# now see if contents are actual node and comment or CDATA
try:
e = parseString("<a>%s</a>" % oldvalue).childNodes[0]
except (ParseError, MismatchedTags):
return
if len(e.childNodes) != 1:
return
e = e.firstChild()
if isinstance(e, (CDATASection, Comment)):
el.childNodes = []
if prefix:
el.childNodes.append(Text(prefix))
el.childNodes.append(e)
def gotDoctype(self, doctype):
self._mddoctype = doctype
def gotTagStart(self, name, attributes):
# print ' '*self.indentlevel, 'start tag',name
# self.indentlevel += 1
parent = self._getparent()
if (self.beExtremelyLenient and isinstance(parent, Element)):
parentName = parent.tagName
myName = name
if self.caseInsensitive:
parentName = parentName.lower()
myName = myName.lower()
if myName in self.laterClosers.get(parentName, []):
self.gotTagEnd(parent.tagName)
parent = self._getparent()
attributes = _unescapeDict(attributes)
namespaces = self.nsstack[-1][0]
newspaces = {}
for k, v in attributes.items():
if k.startswith('xmlns'):
spacenames = k.split(':',1)
if len(spacenames) == 2:
newspaces[spacenames[1]] = v
else:
newspaces[''] = v
del attributes[k]
if newspaces:
namespaces = namespaces.copy()
namespaces.update(newspaces)
for k, v in attributes.items():
ksplit = k.split(':', 1)
if len(ksplit) == 2:
pfx, tv = ksplit
if pfx != 'xml' and pfx in namespaces:
attributes[namespaces[pfx], tv] = v
del attributes[k]
el = Element(name, attributes, parent,
self.filename, self.saveMark(),
caseInsensitive=self.caseInsensitive,
preserveCase=self.preserveCase,
namespace=namespaces.get(''))
revspaces = _reverseDict(newspaces)
el.addPrefixes(revspaces)
if newspaces:
rscopy = self.nsstack[-1][2].copy()
rscopy.update(revspaces)
self.nsstack.append((namespaces, el, rscopy))
self.elementstack.append(el)
if parent:
parent.appendChild(el)
if (self.beExtremelyLenient and el.tagName in self.soonClosers):
self.gotTagEnd(name)
def _gotStandalone(self, factory, data):
parent = self._getparent()
te = factory(data, parent)
if parent:
parent.appendChild(te)
elif self.beExtremelyLenient:
self.documents.append(te)
def gotText(self, data):
if data.strip() or self.shouldPreserveSpace():
self._gotStandalone(Text, data)
def gotComment(self, data):
self._gotStandalone(Comment, data)
def gotEntityReference(self, entityRef):
self._gotStandalone(EntityReference, entityRef)
def gotCData(self, cdata):
self._gotStandalone(CDATASection, cdata)
def gotTagEnd(self, name):
# print ' '*self.indentlevel, 'end tag',name
# self.indentlevel -= 1
if not self.elementstack:
if self.beExtremelyLenient:
return
raise MismatchedTags(*((self.filename, "NOTHING", name)
+self.saveMark()+(0,0)))
el = self.elementstack.pop()
pfxdix = self.nsstack[-1][2]
if self.nsstack[-1][1] is el:
nstuple = self.nsstack.pop()
else:
nstuple = None
if self.caseInsensitive:
tn = el.tagName.lower()
cname = name.lower()
else:
tn = el.tagName
cname = name
nsplit = name.split(':',1)
if len(nsplit) == 2:
pfx, newname = nsplit
ns = pfxdix.get(pfx,None)
if ns is not None:
if el.namespace != ns:
if not self.beExtremelyLenient:
raise MismatchedTags(*((self.filename, el.tagName, name)
+self.saveMark()+el._markpos))
if not (tn == cname):
if self.beExtremelyLenient:
if self.elementstack:
lastEl = self.elementstack[0]
for idx in xrange(len(self.elementstack)):
if self.elementstack[-(idx+1)].tagName == cname:
self.elementstack[-(idx+1)].endTag(name)
break
else:
# this was a garbage close tag; wait for a real one
self.elementstack.append(el)
if nstuple is not None:
self.nsstack.append(nstuple)
return
del self.elementstack[-(idx+1):]
if not self.elementstack:
self.documents.append(lastEl)
return
else:
raise MismatchedTags(*((self.filename, el.tagName, name)
+self.saveMark()+el._markpos))
el.endTag(name)
if not self.elementstack:
self.documents.append(el)
if self.beExtremelyLenient and el.tagName == "script":
self._fixScriptElement(el)
def connectionLost(self, reason):
XMLParser.connectionLost(self, reason) # This can cause more events!
if self.elementstack:
if self.beExtremelyLenient:
self.documents.append(self.elementstack[0])
else:
raise MismatchedTags(*((self.filename, self.elementstack[-1],
"END_OF_FILE")
+self.saveMark()
+self.elementstack[-1]._markpos))
def parse(readable, *args, **kwargs):
"""Parse HTML or XML readable."""
if not hasattr(readable, "read"):
readable = open(readable, "rb")
mdp = MicroDOMParser(*args, **kwargs)
mdp.filename = getattr(readable, "name", "<xmlfile />")
mdp.makeConnection(None)
if hasattr(readable,"getvalue"):
mdp.dataReceived(readable.getvalue())
else:
r = readable.read(1024)
while r:
mdp.dataReceived(r)
r = readable.read(1024)
mdp.connectionLost(None)
if not mdp.documents:
raise ParseError(mdp.filename, 0, 0, "No top-level Nodes in document")
if mdp.beExtremelyLenient:
if len(mdp.documents) == 1:
d = mdp.documents[0]
if not isinstance(d, Element):
el = Element("html")
el.appendChild(d)
d = el
else:
d = Element("html")
for child in mdp.documents:
d.appendChild(child)
else:
d = mdp.documents[0]
doc = Document(d)
doc.doctype = mdp._mddoctype
return doc
def parseString(st, *args, **kw):
if isinstance(st, UnicodeType):
# this isn't particularly ideal, but it does work.
return parse(StringIO(st.encode('UTF-16')), *args, **kw)
return parse(StringIO(st), *args, **kw)
def parseXML(readable):
"""Parse an XML readable object."""
return parse(readable, caseInsensitive=0, preserveCase=1)
def parseXMLString(st):
"""Parse an XML readable object."""
return parseString(st, caseInsensitive=0, preserveCase=1)
# Utility
class lmx:
"""Easy creation of XML."""
def __init__(self, node='div'):
if isinstance(node, StringTypes):
node = Element(node)
self.node = node
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError("no private attrs")
return lambda **kw: self.add(name,**kw)
def __setitem__(self, key, val):
self.node.setAttribute(key, val)
def __getitem__(self, key):
return self.node.getAttribute(key)
def text(self, txt, raw=0):
nn = Text(txt, raw=raw)
self.node.appendChild(nn)
return self
def add(self, tagName, **kw):
newNode = Element(tagName, caseInsensitive=0, preserveCase=0)
self.node.appendChild(newNode)
xf = lmx(newNode)
for k, v in kw.items():
if k[0] == '_':
k = k[1:]
xf[k]=v
return xf
|
clconway/shipshape
|
refs/heads/master
|
third_party/node/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py
|
2698
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
|
BurtBiel/azure-cli
|
refs/heads/master
|
src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_app_gateway/lib/models/template_link.py
|
1
|
#---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
#pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TemplateLink(Model):
"""
Entity representing the reference to the template.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar uri: URI referencing the template. Default value:
"https://azuresdkci.blob.core.windows.net/templatehost/CreateAppGateway_2016-07-19/azuredeploy.json"
.
:vartype uri: str
:param content_version: If included it must match the ContentVersion in
the template.
:type content_version: str
"""
_validation = {
'uri': {'required': True, 'constant': True},
}
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
}
uri = "https://azuresdkci.blob.core.windows.net/templatehost/CreateAppGateway_2016-07-19/azuredeploy.json"
def __init__(self, content_version=None):
self.content_version = content_version
|
jnayak1/osf.io
|
refs/heads/develop
|
api/wikis/serializers.py
|
6
|
import sys
from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, IDField, TypeField, Link, LinksField, RelationshipField
from api.base.utils import absolute_reverse
class WikiSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'name',
'date_modified'
])
id = IDField(source='_id', read_only=True)
type = TypeField()
name = ser.CharField(source='page_name')
kind = ser.SerializerMethodField()
size = ser.SerializerMethodField()
path = ser.SerializerMethodField()
materialized_path = ser.SerializerMethodField(method_name='get_path')
date_modified = ser.DateTimeField(source='date')
content_type = ser.SerializerMethodField()
extra = ser.SerializerMethodField(help_text='Additional metadata about this wiki')
user = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<user._id>'}
)
node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'}
)
comments = RelationshipField(
related_view='nodes:node-comments',
related_view_kwargs={'node_id': '<node._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': '<pk>'}
)
# LinksField.to_representation adds link to "self"
links = LinksField({
'info': Link('wikis:wiki-detail', kwargs={'wiki_id': '<_id>'}),
'download': 'get_wiki_content'
})
class Meta:
type_ = 'wikis'
def get_absolute_url(self, obj):
return obj.get_absolute_url()
def get_path(self, obj):
return '/{}'.format(obj)
def get_kind(self, obj):
return 'file'
def get_size(self, obj):
return sys.getsizeof(obj.content)
def get_content_type(self, obj):
return 'text/markdown'
def get_extra(self, obj):
return {
'version': obj.version
}
def get_wiki_content(self, obj):
return absolute_reverse('wikis:wiki-content', kwargs={
'wiki_id': obj._id,
})
class WikiDetailSerializer(WikiSerializer):
"""
Overrides Wiki Serializer to make id required.
"""
id = IDField(source='_id', required=True)
|
jsoref/django
|
refs/heads/master
|
django/db/backends/oracle/schema.py
|
404
|
import binascii
import copy
import datetime
import re
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.text import force_text
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_alter_column_null = "MODIFY %(column)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s NOT NULL"
sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s"
sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS"
def quote_value(self, value):
if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
return "'%s'" % value
elif isinstance(value, six.string_types):
return "'%s'" % six.text_type(value).replace("\'", "\'\'")
elif isinstance(value, six.buffer_types):
return "'%s'" % force_text(binascii.hexlify(value))
elif isinstance(value, bool):
return "1" if value else "0"
else:
return str(value)
def delete_model(self, model):
# Run superclass action
super(DatabaseSchemaEditor, self).delete_model(model)
# Clean up any autoincrement trigger
self.execute("""
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 1 THEN
EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % {'sq_name': self.connection.ops._get_sequence_name(model._meta.db_table)})
def alter_field(self, model, old_field, new_field, strict=False):
try:
super(DatabaseSchemaEditor, self).alter_field(model, old_field, new_field, strict)
except DatabaseError as e:
description = str(e)
# If we're changing type to an unsupported type we need a
# SQLite-ish workaround
if 'ORA-22858' in description or 'ORA-22859' in description:
self._alter_field_type_workaround(model, old_field, new_field)
else:
raise
def _alter_field_type_workaround(self, model, old_field, new_field):
"""
Oracle refuses to change from some type to other type.
What we need to do instead is:
- Add a nullable version of the desired field with a temporary name
- Update the table to transfer values from old to new
- Drop old column
- Rename the new column and possibly drop the nullable property
"""
# Make a new field that's like the new one but with a temporary
# column name.
new_temp_field = copy.deepcopy(new_field)
new_temp_field.null = True
new_temp_field.column = self._generate_temp_name(new_field.column)
# Add it
self.add_field(model, new_temp_field)
# Explicit data type conversion
# https://docs.oracle.com/cd/B19306_01/server.102/b14200/sql_elements002.htm#sthref340
new_value = self.quote_name(old_field.column)
old_type = old_field.db_type(self.connection)
if re.match('^N?CLOB', old_type):
new_value = "TO_CHAR(%s)" % new_value
old_type = 'VARCHAR2'
if re.match('^N?VARCHAR2', old_type):
new_internal_type = new_field.get_internal_type()
if new_internal_type == 'DateField':
new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value
elif new_internal_type == 'DateTimeField':
new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
elif new_internal_type == 'TimeField':
# TimeField are stored as TIMESTAMP with a 1900-01-01 date part.
new_value = "TO_TIMESTAMP(CONCAT('1900-01-01 ', %s), 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
# Transfer values across
self.execute("UPDATE %s set %s=%s" % (
self.quote_name(model._meta.db_table),
self.quote_name(new_temp_field.column),
new_value,
))
# Drop the old field
self.remove_field(model, old_field)
# Rename and possibly make the new field NOT NULL
super(DatabaseSchemaEditor, self).alter_field(model, new_temp_field, new_field)
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by
quote_name(), but without the actual quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
nn = nn[1:-1]
return nn
def _generate_temp_name(self, for_name):
"""
Generates temporary names for workarounds that need temp columns
"""
suffix = hex(hash(for_name)).upper()[1:]
return self.normalize_name(for_name + "_" + suffix)
def prepare_default(self, value):
return self.quote_value(value)
|
Jgarcia-IAS/Fidelizacion_odoo
|
refs/heads/master
|
openerp/addons/web_graph/__openerp__.py
|
376
|
{
'name': 'Graph Views',
'category': 'Hidden',
'description': """
Graph Views for Web Client.
===========================
* Parse a <graph> view but allows changing dynamically the presentation
* Graph Types: pie, lines, areas, bars, radar
* Stacked/Not Stacked for areas and bars
* Legends: top, inside (top/left), hidden
* Features: download as PNG or CSV, browse data grid, switch orientation
* Unlimited "Group By" levels (not stacked), two cross level analysis (stacked)
""",
'version': '3.0',
'depends': ['web'],
'data' : [
'views/web_graph.xml',
],
'qweb' : [
'static/src/xml/*.xml',
],
'auto_install': True
}
|
AndresCidoncha/BubecasBot
|
refs/heads/master
|
telegram/update.py
|
1
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents a Telegram Update."""
from telegram import (Message, TelegramObject, InlineQuery,
ChosenInlineResult, CallbackQuery)
class Update(TelegramObject):
"""This object represents a Telegram Update.
Attributes:
update_id (int):
message (:class:`telegram.Message`):
inline_query (:class:`telegram.InlineQuery`):
chosen_inline_result (:class:`telegram.ChosenInlineResult`):
callback_query (:class:`telegram.CallbackQuery`):
Args:
update_id (int):
**kwargs: Arbitrary keyword arguments.
Keyword Args:
message (Optional[:class:`telegram.Message`]):
inline_query (Optional[:class:`telegram.InlineQuery`]):
chosen_inline_result (Optional[:class:`telegram.ChosenInlineResult`])
callback_query (Optional[:class:`telegram.CallbackQuery`]):
"""
def __init__(self,
update_id,
**kwargs):
# Required
self.update_id = int(update_id)
# Optionals
self.message = kwargs.get('message')
self.inline_query = kwargs.get('inline_query')
self.chosen_inline_result = kwargs.get('chosen_inline_result')
self.callback_query = kwargs.get('callback_query')
@staticmethod
def de_json(data):
"""
Args:
data (dict):
Returns:
telegram.Update:
"""
if not data:
return None
data['message'] = Message.de_json(data.get('message'))
data['inline_query'] = InlineQuery.de_json(data.get('inline_query'))
data['chosen_inline_result'] = \
ChosenInlineResult.de_json(data.get('chosen_inline_result'))
data['callback_query'] = \
CallbackQuery.de_json(data.get('callback_query'))
return Update(**data)
|
kzietek/dmxAmbientLight
|
refs/heads/master
|
tplight.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Control class for TP-Link A19-LB130 RBGW WiFi bulb
'''
import datetime
import socket
import json
import sys
class LB130(object):
'''
Methods for controlling the LB130 bulb
'''
encryption_key = 0xAB
__udp_ip = "10.0.0.130"
__udp_port = 9999
__on_off = 0
__transition_period = 0
__hue = 0
__saturation = 0
__brightness = 0
__color_temp = 0
__connected = False
__alias = ""
device_id = ""
lamp_beam_angle = 0
min_voltage = 0
max_voltage = 0
wattage = 0
incandescent_equivalent = 0
max_lumens = 0
color_rendering_index = 0
# Public Methods
def __init__(self, ip_address):
'''
Initialise the bulb with an ip address
'''
# validate the ip address
ip_array = ip_address.split(".")
valid_ip = True
try:
if len(ip_array) == 4:
for ipval in ip_array:
if int(ipval) < 0 or int(ipval) > 255:
valid_ip = False
else:
valid_ip = False
except (RuntimeError, TypeError, ValueError):
valid_ip = False
if valid_ip:
self.__udp_ip = ip_address
# Parse the sysinfo JSON message to get the
# status of the various parameters
try:
data = json.loads(self.status())
col1 = 'system'
col2 = 'get_sysinfo'
col3 = 'light_state'
self.__alias = data[col1][col2]['alias']
self.__on_off = int(data[col1][col2][col3]['on_off'])
self.__hue = int(data[col1][col2][col3]['hue'])
self.__saturation = int(data[col1][col2][col3]['saturation'])
self.__brightness = int(data[col1][col2][col3]['brightness'])
self.__color_temp = int(data[col1][col2][col3]['color_temp'])
self.device_id = str(data[col1][col2]['deviceId'])
except (RuntimeError, TypeError, ValueError) as exception:
raise Exception(exception)
# Parse the light details JSON message to get the
# status of the various parameters
try:
data = json.loads(self.light_details())
col1 = 'smartlife.iot.smartbulb.lightingservice'
col2 = 'get_light_details'
inc = 'incandescent_equivalent'
colour = 'color_rendering_index'
self.lamp_beam_angle = int(data[col1][col2]['lamp_beam_angle'])
self.min_voltage = int(data[col1][col2]['min_voltage'])
self.max_voltage = int(data[col1][col2]['max_voltage'])
self.wattage = int(data[col1][col2]['wattage'])
self.incandescent_equivalent = int(data[col1][col2][inc])
self.max_lumens = int(data[col1][col2]['max_lumens'])
self.color_rendering_index = str(data[col1][col2][colour])
except (RuntimeError, TypeError, ValueError) as exception:
raise Exception(exception)
else:
raise ValueError('Invalid IPv4 IP address.')
def status(self):
'''
Get the connection status from the bulb
'''
message = "{\"system\":{\"get_sysinfo\":{}}}"
return self.__fetch_data(message)
def light_details(self):
'''
Get the light details from the bulb
'''
message = "{\"smartlife.iot.smartbulb.lightingservice\":\
{\"get_light_details\":\"\"}}"
return self.__fetch_data(message)
def on(self):
'''
Set the bulb to an on state
'''
__bulb_on_off = 1
self.__update("{\"smartlife.iot.smartbulb.lightingservice\":{\"\
transition_light_state\":{\"ignore_default\":1,\"\
transition_period\":" +
str(self.__transition_period) + ",\"on_off\":1}}}")
def off(self):
'''
Set the bulb to an off state
'''
__bulb_on_off = 0
self.__update("{\"smartlife.iot.smartbulb.lightingservice\":{\"\
transition_light_state\":{\"ignore_default\":1,\"transition_period\"\
:" + str(self.__transition_period) + ",\"on_off\":0}}}")
def reboot(self):
'''
Reboot the bulb
'''
self.__update("{\"smartlife.iot.common.system\":{\"reboot\":\
{\"delay\":1}}}")
@property
def alias(self):
'''
Get the device alias
'''
return self.__alias
@alias.setter
def alias(self, name):
'''
Set the device alias
'''
self.__update("{\"smartlife.iot.common.system\":{\"set_dev_alias\"\
:{\"alias\":\"" + name + "\"}}}")
@property
def time(self):
'''
Get the date and time from the device
'''
message = "{\"smartlife.iot.common.timesetting\":{\"get_time\":{}}}"
device_time = datetime
data = json.loads(self.__fetch_data(message))
col1 = 'smartlife.iot.common.timesetting'
device_time.year = data[col1]['get_time']['year']
device_time.month = data[col1]['get_time']['month']
device_time.day = data[col1]['get_time']['mday']
device_time.hour = data[col1]['get_time']['hour']
device_time.minute = data[col1]['get_time']['min']
device_time.second = data[col1]['get_time']['sec']
return device_time
@time.setter
def time(self, date):
'''
Set the date and time on the device
'''
if isinstance(date, datetime.datetime):
self.__update("{\"smartlife.iot.common.timesetting\":{\"set_time\"\
:{\"year\":" + str(date.year) +
",\"month\":" + str(date.month) +
",\"mday\":" + str(date.day) +
",\"hour\":" + str(date.hour) +
",\"min\":" + str(date.minute) +
",\"sec\":" + str(date.second) +
"}}}")
else:
raise ValueError('Invalid type: must pass a datetime object')
return
@property
def timezone(self):
'''
Get the timezone from the device
'''
message = "{\"smartlife.iot.common.timesetting\":\
{\"get_timezone\":{}}}"
data = json.loads(self.__fetch_data(message))
col1 = 'smartlife.iot.common.timesetting'
timezone = data[col1]['get_timezone']['index']
return timezone
@timezone.setter
def timezone(self, timezone):
'''
Set the timezone on the device
'''
if timezone >= 0 and timezone <= 109:
date = self.time
self.__update("{\"smartlife.iot.common.timesetting\":\
{\"set_timezone\":{\"index\":" + str(timezone) +
",\"year\":" + str(date.year) +
",\"month\":" + str(date.month) +
",\"mday\":" + str(date.day) +
",\"hour\":" + str(date.hour) +
",\"min\":" + str(date.minute) +
",\"sec\":" + str(date.second) + "}}}")
else:
raise ValueError('Timezone out of range: 0 to 109')
return
@property
def transition_period(self):
'''
Get the bulb transition period
'''
return self.__transition_period
@transition_period.setter
def transition_period(self, period):
'''
Set the bulb transition period
'''
if period >= 0 and period <= 100000:
self.__transition_period = period
else:
raise ValueError('transition_period out of range: 0 to 100000')
@property
def hue(self):
'''
Get the bulb hue
'''
return self.__hue
@hue.setter
def hue(self, hue):
'''
Set the bulb hue
'''
if hue >= 0 and hue <= 360:
self.__hue = hue
self.__update("{\"smartlife.iot.smartbulb.lightingservice\":\
{\"transition_light_state\":{\"ignore_default\":\
1,\"transition_period\":" +
str(self.__transition_period) +
",\"hue\":" + str(self.__hue) + "\
,\"color_temp\":0}}}")
else:
raise ValueError('hue out of range: 0 to 360')
@property
def saturation(self):
'''
Get the bulb saturation
'''
return self.__saturation
@saturation.setter
def saturation(self, saturation):
'''
Set the bulb saturation
'''
if saturation >= 0 and saturation <= 100:
self.__saturation = saturation
self.__update("{\"smartlife.iot.smartbulb.lightingservice\":\
{\"transition_light_state\":{\"ignore_default\":1,\"\
transition_period\":" + str(self.__transition_period) +
",\"saturation\":" + str(self.__saturation) +
",\"color_temp\":0}}}")
else:
raise ValueError('saturation value out of range: 0 to 100')
@property
def brightness(self):
'''
Get the bulb brightness
'''
return self.__brightness
@brightness.setter
def brightness(self, brightness):
'''
Set the bulb brightness
'''
if brightness >= 0 and brightness <= 100:
self.__brightness = brightness
self.__update("{\"smartlife.iot.smartbulb.lightingservice\":\
{\"transition_light_state\":{\"ignore_default\":1,\"\
transition_period\":" + str(self.__transition_period) +
",\"brightness\":" + str(self.__brightness) + "}}}")
else:
raise ValueError('brightness out of range: 0 to 100')
def setHSL(self, h, s, l):
self.__brightness = l
self.__saturation = s
self.__hue = h
self.__updateLite("{\"smartlife.iot.smartbulb.lightingservice\":\
{\"transition_light_state\":{\"ignore_default\":1,\"\
transition_period\":" + str(self.__transition_period) +
",\"brightness\":" + str(self.__brightness) +
",\"saturation\":" + str(self.__saturation) +
",\"hue\":" + str(self.__hue) + "\
,\"color_temp\":0" +
"}}}")
@property
def temperature(self):
'''
Get the bulb color temperature
'''
return self.__color_temp
@temperature.setter
def temperature(self, temperature):
'''
Set the bulb color temperature
'''
if temperature >= 0 and temperature <= 7000:
self.__color_temp = temperature
self.__update("{\"smartlife.iot.smartbulb.lightingservice\":\
{\"transition_light_state\":{\"ignore_default\":\
1,\"transition_period\":" +
str(self.__transition_period) + ",\"color_temp\":" +
str(self.__color_temp) + "}}}")
else:
raise ValueError('temperature out of range: 0 to 7000')
# private methods
@staticmethod
def __encrypt(value, key):
'''
Encrypt the command string
'''
valuelist = list(value)
for i in range(len(valuelist)):
var = ord(valuelist[i])
valuelist[i] = chr(var ^ int(key))
key = ord(valuelist[i])
if sys.version_info >= (3,0):
return bytearray("".join(valuelist).encode("latin_1")) # python 3 fix
else:
return "".join(valuelist)
@staticmethod
def __decrypt(value, key):
'''
Decrypt the command string
'''
valuelist = list(value.decode("latin_1"))
for i in range(len(valuelist)):
var = ord(valuelist[i])
valuelist[i] = chr(var ^ key)
key = var
return "".join(valuelist)
def __updateLite(self, message):
'''
Update the bulbs status
'''
enc_message = self.__encrypt(message, self.encryption_key)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5)
sock.sendto(enc_message, (self.__udp_ip, self.__udp_port))
except:
raise RuntimeError("Error connecting to bulb")
def __update(self, message):
'''
Update the bulbs status
'''
enc_message = self.__encrypt(message, self.encryption_key)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5)
sock.sendto(enc_message, (self.__udp_ip, self.__udp_port))
data_received = False
dec_data = ""
while True:
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
dec_data = self.__decrypt(data, self.encryption_key)
if "}}}" in dec_data: # end of sysinfo message
data_received = True
break
if data_received:
if "\"err_code\":0" in dec_data:
return
else:
raise RuntimeError("Bulb returned error: " + dec_data)
else:
raise RuntimeError("Error connecting to bulb")
except:
raise RuntimeError("Error connecting to bulb")
def __fetch_data(self, message):
'''
Fetch data from the device
'''
enc_message = self.__encrypt(message, self.encryption_key)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5)
sock.sendto(enc_message, (self.__udp_ip, self.__udp_port))
data_received = False
dec_data = ""
while True:
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
dec_data = self.__decrypt(data, self.encryption_key)
if "}}}" in dec_data: # end of sysinfo message
data_received = True
break
if data_received:
if "\"err_code\":0" in dec_data:
return dec_data
else:
raise RuntimeError("Bulb returned error: " + dec_data)
else:
raise RuntimeError("Error connecting to bulb")
except:
raise RuntimeError("Error connecting to bulb")
|
OTAkeys/RIOT
|
refs/heads/master
|
dist/tools/compile_and_test_for_board/compile_and_test_for_board.py
|
3
|
#! /usr/bin/env python3
"""
This script handles building all applications and tests for one board and also
execute tests if they are available.
An incremental build can selected using `--incremental` to not rerun successful
compilation and tests. But then it should be run on a fixed version of the
repository as no verification is done if results are up to date with the RIOT
repository.
It by defaults finds all tests in `examples` and `tests` but list of tests can
be provided by command line options with also an exclude list, to for example
not rerun a long failing test every time.
It is a temporary solution until an equivalent is implemented in the build
system. It is also a showcase of what could be dummy file output for
compilation and tests.
Example
-------
By default it should be run as
./compile_and_test_for_board.py path_to_riot_directory board_name [results]
Usage
-----
```
usage: compile_and_test_for_board.py [-h] [--applications APPLICATIONS]
[--applications-exclude APPLICATIONS_EXCLUDE]
[--no-test] [--with-test-only]
[--loglevel {debug,info,warning,error,fatal,critical}]
[--incremental] [--clean-after]
[--compile-targets COMPILE_TARGETS]
[--flash-targets FLASH_TARGETS]
[--test-targets TEST_TARGETS]
[--test-available-targets TEST_AVAILABLE_TARGETS]
[--report-xml] [--jobs JOBS]
riot_directory board [result_directory]
positional arguments:
riot_directory RIOT directory to test
board Board to test
result_directory Result directory (default: results)
optional arguments:
-h, --help show this help message and exit
--applications APPLICATIONS
List of applications to test, overwrites default
configuration of testing all applications (default:
None)
--applications-exclude APPLICATIONS_EXCLUDE
List of applications to exclude from tested
applications. Also applied after "--applications".
(default: None)
--no-test Disable executing tests (default: False)
--with-test-only Only compile applications that have a test (default:
False)
--loglevel {debug,info,warning,error,fatal,critical}
Python logger log level (default: info)
--incremental Do not rerun successful compilation and tests
(default: False)
--clean-after Clean after running each test (default: False)
--compile-targets COMPILE_TARGETS
List of make targets to compile (default: clean all)
--flash-targets FLASH_TARGETS
List of make targets to flash (default: flash-only)
--test-targets TEST_TARGETS
List of make targets to run test (default: test)
--test-available-targets TEST_AVAILABLE_TARGETS
List of make targets to know if a test is present
(default: test/available)
--report-xml Output results to report.xml in the result_directory
(default: False)
--jobs JOBS, -j JOBS Parallel building (0 means not limit, like '--jobs')
(default: None)
```
""" # noqa
import os
import sys
import glob
import shutil
import logging
import argparse
import subprocess
import collections
try:
import junit_xml
import io
import time
except ImportError:
junit_xml = None
LOG_HANDLER = logging.StreamHandler()
LOG_HANDLER.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
LOG_LEVELS = ('debug', 'info', 'warning', 'error', 'fatal', 'critical')
MAKE = os.environ.get('MAKE', 'make')
class ErrorInTest(Exception):
"""Custom exception for a failed test.
It contains the step that failed in 'message', the 'application' and the
'errorfile' path to the execution error.
"""
def __init__(self, message, application, errorfile):
super().__init__(message)
self.application = application
self.errorfile = errorfile
def _expand_apps_directories(apps_dirs, riotdir, skip=False):
"""Expand the list of applications using wildcards."""
# Get the full list of RIOT applications in riotdir
_riot_applications = _riot_applications_dirs(riotdir)
if apps_dirs is None:
if skip is True:
return []
return _riot_applications
ret = []
for app_dir in apps_dirs:
if os.path.isdir(app_dir):
# Case where the application directory exists: don't use globbing.
# the application directory can also be outside of riotdir and
# relative to it.
ret += [app_dir]
else:
ret += [
os.path.relpath(el, riotdir)
for el in glob.glob(os.path.join(riotdir, app_dir))
if os.path.relpath(el, riotdir) in _riot_applications
]
return ret
def apps_directories(riotdir, apps_dirs=None, apps_dirs_skip=None):
"""Return sorted list of test directories relative to `riotdir`.
By default it uses RIOT 'info-applications' command to list them.
:param riotdir: base riot directory
:param apps_dirs: use this applications list instead of the RIOT list
:param apps_dirs_skip: list of application directories to remove, applied
on the RIOT list or `apps_dirs`
"""
apps_dirs = apps_dirs or _riot_applications_dirs(riotdir)
apps_dirs_skip = apps_dirs_skip or []
# Remove applications to skip
apps_dirs = set(apps_dirs) - set(apps_dirs_skip)
return sorted(list(apps_dirs))
def _riot_applications_dirs(riotdir):
"""Applications directories in the RIOT repository with relative path."""
cmd = [MAKE, 'info-applications']
out = subprocess.check_output(cmd, cwd=riotdir)
out = out.decode('utf-8', errors='replace')
return out.split()
def check_is_board(riotdir, board):
"""Verify if board is a RIOT board.
:raises ValueError: on invalid board
:returns: board name
"""
if board == 'common':
raise ValueError("'%s' is not a board" % board)
board_dir = os.path.join(riotdir, 'boards', board)
if not os.path.isdir(board_dir):
raise ValueError("Cannot find '%s' in %s/boards" % (board, riotdir))
return board
def create_directory(directory, clean=False, mode=0o755):
"""Directory creation helper with `clean` option.
:param clean: tries deleting the directory before re-creating it
"""
if clean:
try:
shutil.rmtree(directory)
except OSError:
pass
os.makedirs(directory, mode=mode, exist_ok=True)
def is_in_directory(path, directory):
"""Return if `path` is inside `directory`.
>>> is_in_directory('RIOT/a/b/c', 'RIOT')
True
>>> is_in_directory('RIOT/../a', 'RIOT')
False
# Also work if path is absolute but not the directory
>>> curdir = os.path.abspath(os.curdir)
>>> is_in_directory(os.path.join(curdir, 'RIOT', 'a'), 'RIOT')
True
"""
directory = os.path.abspath(directory)
path = os.path.abspath(path)
return path.startswith(directory)
class RIOTApplication():
"""RIOT Application representation.
Allows calling make commands on an application for a board.
:param board: board name
:param riotdir: RIOT repository directory
:param appdir: directory of the application, can be relative to riotdir
:param resultdir: base directory where to put execution results
:param junit: track application in JUnit XML
"""
MAKEFLAGS = ('RIOT_CI_BUILD=1', 'CC_NOCOLOR=1', '--no-print-directory')
COMPILE_TARGETS = ('clean', 'all',)
FLASH_TARGETS = ('flash-only',)
TEST_TARGETS = ('test',)
TEST_AVAILABLE_TARGETS = ('test/available',)
# pylint: disable=too-many-arguments
def __init__(self, board, riotdir, appdir, resultdir, junit=False):
self.board = board
self.riotdir = riotdir
self.appdir = appdir
self.resultdir = os.path.join(resultdir, appdir)
if junit:
if not junit_xml:
raise ImportError("`junit-xml` required for --report-xml")
self.testcase = junit_xml.TestCase(name=self.appdir,
stdout='', stderr='')
self.log_stream = io.StringIO()
logging.basicConfig(stream=self.log_stream)
else:
self.testcase = None
self.logger = logging.getLogger('%s.%s' % (board, appdir))
# Currently not handling absolute directories or outside of RIOT
assert is_in_directory(self.resultdir, resultdir), \
"Application result directory is outside main result directory"
# Extract values from make
def name(self):
"""Get application name."""
appname = self.make(['info-debug-variable-APPLICATION'],
log_error=True).strip()
self.logger.debug('APPLICATION: %s', appname)
return appname
def has_test(self):
"""Detect if the application has tests.
Check TEST_AVAILABLE_TARGETS execute without error.
"""
try:
self.make(self.TEST_AVAILABLE_TARGETS)
except subprocess.CalledProcessError:
has_test = False
else:
has_test = True
self.logger.info('Application has test: %s', has_test)
return has_test
def board_is_supported(self):
"""Return if current board is supported."""
env = {'BOARDS': self.board}
cmd = ['info-boards-supported']
ret = self.make(cmd, env=env, log_error=True).strip()
supported = ret == self.board
self.logger.info('Board supported: %s', supported)
return supported
def board_has_enough_memory(self):
"""Return if current board has enough memory."""
cmd = ['info-debug-variable-BOARD_INSUFFICIENT_MEMORY']
boards = self.make(cmd, log_error=True).strip().split()
has_enough_memory = self.board not in boards
self.logger.info('Board has enough memory: %s', has_enough_memory)
return has_enough_memory
def clean(self):
"""Clean build and packages."""
try:
cmd = ['clean', 'clean-pkg']
self.make(cmd)
except subprocess.CalledProcessError as err:
if self.testcase:
self.testcase.stderr += err.output + '\n'
self.logger.warning('Got an error during clean, ignore: %r', err)
def clean_intermediates(self):
"""Clean intermediates only."""
try:
cmd = ['clean-intermediates']
self.make(cmd)
except subprocess.CalledProcessError as err:
if self.testcase:
self.testcase.stderr += err.output + '\n'
self.logger.warning('Got an error during clean-intermediates,'
' ignore: %r', err)
def run_compilation_and_test(self, **test_kwargs):
"""Same as `compilation_and_test` but handles exception.
:returns: 0 on success and 1 on error.
"""
try:
if self.testcase:
self.testcase.timestamp = time.time()
self.compilation_and_test(**test_kwargs)
res = None
except ErrorInTest as err:
self.logger.error('Failed during: %s', err)
res = (str(err), err.application.appdir, err.errorfile)
if self.testcase:
self.testcase.elapsed_sec = time.time() - self.testcase.timestamp
self.testcase.log = self.log_stream.getvalue()
if not self.testcase.stdout:
self.testcase.stdout = None
if not self.testcase.stderr:
self.testcase.stderr = None
return res
def _skip(self, skip_reason, skip_reason_details=None, output=None):
if self.testcase:
self.testcase.add_skipped_info(
skip_reason_details if skip_reason_details else skip_reason,
output,
)
self._write_resultfile('skip', skip_reason)
def compilation_and_test(self, clean_after=False, runtest=True,
incremental=False, jobs=False,
with_test_only=False):
# pylint:disable=too-many-arguments
"""Compile and execute test if available.
Checks for board supported/enough memory, compiles.
If there are tests, also flash the device and run them.
Output files are written in `self.resultdir`
When `clean_after` is set, clean intermediates files not required for
the possible following steps. It keeps the elffile after compiling in
case test would be run later and does a full clean after the test
succeeds.
:param incremental: Do not rerun successful compilation and tests
:raises ErrorInTest: on execution failed during one step
"""
# Ignore incompatible APPS
if not self.board_is_supported():
create_directory(self.resultdir, clean=True)
self._skip('not_supported', 'Board not supported')
return
if not self.board_has_enough_memory():
create_directory(self.resultdir, clean=True)
self._skip(
'not_enough_memory',
'Board has not enough memory to carry application',
)
return
has_test = self.has_test()
if with_test_only and not has_test:
create_directory(self.resultdir, clean=True)
self._skip(
'disabled_has_no_tests',
"{} has no tests".format(self.appdir)
)
return
# Normal case for supported apps
create_directory(self.resultdir, clean=not incremental)
# Run compilation and flash+test
# It raises ErrorInTest on error which is handled outside
compilation_cmd = list(self.COMPILE_TARGETS)
if jobs is not None:
compilation_cmd += ['--jobs']
if jobs:
compilation_cmd += [str(jobs)]
self.make_with_outfile('compilation', compilation_cmd)
if clean_after:
self.clean_intermediates()
if runtest:
if has_test:
setuptasks = collections.OrderedDict(
[('flash', self.FLASH_TARGETS)])
self.make_with_outfile('test', self.TEST_TARGETS,
save_output=True, setuptasks=setuptasks)
if clean_after:
self.clean()
else:
self._skip(
'skip.no_test',
"{} has no tests".format(self.appdir)
)
self.logger.info('Success')
def make(self, args, env=None, log_error=False):
"""Run make command in appdir."""
env = env or {}
# HACK: BOARD should be set for make in environment and not command
# line either it break the `BOARD=none` for global commands
env['BOARD'] = self.board
full_env = os.environ.copy()
full_env.update(env)
cmd = [MAKE]
cmd.extend(self.MAKEFLAGS)
cmd.extend(['-C', os.path.join(self.riotdir, self.appdir)])
cmd.extend(args)
self.logger.debug('%r ENV %s', cmd, env)
# Call without 'universal_newlines' to have bytes and handle decoding
# (encoding and errors are only supported after python 3.6)
try:
out = subprocess.check_output(cmd, env=full_env,
stderr=subprocess.STDOUT)
out = out.decode('utf-8', errors='replace')
except subprocess.CalledProcessError as err:
err.output = err.output.decode('utf-8', errors='replace')
if log_error:
self.logger.error('Error during command: \n%s', err.output)
raise err
return out
def make_with_outfile(self, name, args, save_output=False,
setuptasks=None):
"""Run make but save result in an outfile.
It will be saved in `self.resultdir/name.[success|failure]`.
:param name: basename to use for the result file.
:param save_output: output should be saved in the outfile and returned,
if not, return an empty string.
:param setuptasks: OrderedDict of tasks to run before the main one
"""
self.logger.info('Run %s', name)
setuptasks = setuptasks or {}
# Do not re-run if success
output = self._make_get_previous_output(name)
if output is not None:
if self.testcase:
self.testcase.stdout += output + '\n'
return output
# Run setup-tasks, output is only kept in case of error
for taskname, taskargs in setuptasks.items():
taskname = '%s.%s' % (name, taskname)
self.logger.info('Run %s', taskname)
try:
self.make(taskargs)
except subprocess.CalledProcessError as err:
self._make_handle_error(taskname, err)
# Run make command
try:
output = self.make(args)
if not save_output:
output = ''
if self.testcase:
self.testcase.stdout += output + '\n'
self._write_resultfile(name, 'success', output)
return output
except subprocess.CalledProcessError as err:
self._make_handle_error(name, err)
def _make_get_previous_output(self, name):
"""Get previous result output for step `name`.
Returns `output` if it is there, None if not.
"""
try:
with open(self._outfile('%s.success' % name),
encoding='utf-8') as outputfd:
self.logger.info('Nothing to be done for %s', name)
return outputfd.read()
except OSError:
pass
return None
def _make_handle_error(self, name, err):
"""Handle exception during make step `name`."""
output = ' '.join(err.cmd) + '\n'
output += err.output + '\n'
output += 'Return value: %s\n' % err.returncode
outfile = self._write_resultfile(name, 'failed', output)
self.logger.warning(output)
self.logger.error('Error during %s, writing to %s', name, outfile)
if self.testcase:
self.testcase.stderr += err.output + '\n'
if name == "test":
self.testcase.add_failure_info("{} failed".format(err.cmd),
err.output)
else:
self.testcase.add_error_info("{} had an error".format(err.cmd),
err.output)
raise ErrorInTest(name, self, outfile)
def _write_resultfile(self, name, status, body=''):
"""Write `body` to result file `name.status`.
It also deletes other `name.*` files before.
"""
# Delete previous status files
resultfiles = glob.glob(self._outfile('%s.*' % name))
for resultfile in resultfiles:
try:
os.remove(resultfile)
except OSError:
pass
# Create new file
filename = '%s.%s' % (name, status)
outfile = self._outfile(filename)
with open(outfile, 'w+', encoding='utf-8',
errors='replace') as outfd:
outfd.write(body)
outfd.flush()
return outfile
def _outfile(self, filename):
"""Give path to `filename` with `self.resultdir`."""
return os.path.join(self.resultdir, filename)
TOOLCHAIN_SCRIPT = 'dist/tools/ci/print_toolchain_versions.sh'
def print_toolchain(riotdir):
"""Print toolchain using RIOT script.
Does not handle any execution error
"""
toolchain_script = os.path.join(riotdir, TOOLCHAIN_SCRIPT)
out = subprocess.check_output([toolchain_script])
return out.decode('utf-8', errors='replace')
def save_toolchain(riotdir, resultdir):
"""Save toolchain in 'resultdir/toolchain'."""
outfile = os.path.join(resultdir, 'toolchain')
create_directory(resultdir)
toolchain = print_toolchain(riotdir)
with open(outfile, 'w+', encoding='utf-8', errors='replace') as outputfd:
outputfd.write(toolchain)
def _test_failed_summary(errors, relpathstart=None):
"""Generate a test summary for failures."""
if not errors:
return ''
errors_dict = {}
for step, appdir, errorfile in errors:
if relpathstart:
errorfile = os.path.relpath(errorfile, relpathstart)
errors_dict.setdefault(step, []).append((appdir, errorfile))
summary = ''
for step, errs in sorted(errors_dict.items()):
summary += 'Failures during %s:\n' % step
for appdir, errorfile in errs:
summary += '- [%s](%s)\n' % (appdir, errorfile)
# Separate sections with a new line
summary += '\n'
# Remove last new line
summary = summary[:-1]
return summary
def save_failure_summary(resultdir, summary):
"""Save test summary in 'resultdir/board/failuresummary'."""
outfile = os.path.join(resultdir, 'failuresummary.md')
with open(outfile, 'w+', encoding='utf-8', errors='replace') as outputfd:
outputfd.write(summary)
# Parsing functions
def list_from_string(list_str=None):
"""Get list of items from `list_str`
>>> list_from_string(None)
[]
>>> list_from_string("")
[]
>>> list_from_string(" ")
[]
>>> list_from_string("a")
['a']
>>> list_from_string("a ")
['a']
>>> list_from_string("a b c")
['a', 'b', 'c']
"""
value = (list_str or '').split(' ')
return [v for v in value if v]
def _strip_board_equal(board):
"""Sanitizy board if given as BOARD=board.
Increase RIOT compatibility.
"""
if board.startswith('BOARD='):
board = board.replace('BOARD=', '')
return board
PARSER = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
PARSER.add_argument('riot_directory', help='RIOT directory to test')
PARSER.add_argument('board', help='Board to test', type=_strip_board_equal)
PARSER.add_argument('result_directory', nargs='?', default='results',
help='Result directory')
PARSER.add_argument(
'--applications', type=list_from_string,
help=('List of applications to test, overwrites default configuration of'
' testing all applications'),
)
PARSER.add_argument(
'--applications-exclude', type=list_from_string,
help=('List of applications to exclude from tested applications.'
' Also applied after "--applications".'),
)
PARSER.add_argument('--no-test', action='store_true', default=False,
help='Disable executing tests')
PARSER.add_argument('--with-test-only', action='store_true', default=False,
help='Only compile applications that have a test')
PARSER.add_argument('--loglevel', choices=LOG_LEVELS, default='info',
help='Python logger log level')
PARSER.add_argument('--incremental', action='store_true', default=False,
help='Do not rerun successful compilation and tests')
PARSER.add_argument('--clean-after', action='store_true', default=False,
help='Clean after running each test')
PARSER.add_argument('--compile-targets', type=list_from_string,
default=' '.join(RIOTApplication.COMPILE_TARGETS),
help='List of make targets to compile')
PARSER.add_argument('--flash-targets', type=list_from_string,
default=' '.join(RIOTApplication.FLASH_TARGETS),
help='List of make targets to flash')
PARSER.add_argument('--test-targets', type=list_from_string,
default=' '.join(RIOTApplication.TEST_TARGETS),
help='List of make targets to run test')
PARSER.add_argument('--test-available-targets', type=list_from_string,
default=' '.join(RIOTApplication.TEST_AVAILABLE_TARGETS),
help='List of make targets to know if a test is present')
PARSER.add_argument('--report-xml', action='store_true', default=False,
help='Output results to report.xml in the '
'result_directory')
PARSER.add_argument(
'--jobs', '-j', type=int, default=None,
help="Parallel building (0 means not limit, like '--jobs')")
def main(args):
"""For one board, compile all examples and tests and run test on board."""
logger = logging.getLogger(args.board)
if args.loglevel:
loglevel = logging.getLevelName(args.loglevel.upper())
logger.setLevel(loglevel)
logger.addHandler(LOG_HANDLER)
logger.info('Saving toolchain')
save_toolchain(args.riot_directory, args.result_directory)
board = check_is_board(args.riot_directory, args.board)
logger.debug('board: %s', board)
# Expand application directories: allows use of glob in application names
apps_dirs = _expand_apps_directories(args.applications,
args.riot_directory)
apps_dirs_skip = _expand_apps_directories(args.applications_exclude,
args.riot_directory, skip=True)
app_dirs = apps_directories(args.riot_directory, apps_dirs=apps_dirs,
apps_dirs_skip=apps_dirs_skip)
logger.debug('app_dirs: %s', app_dirs)
logger.debug('resultdir: %s', args.result_directory)
board_result_directory = os.path.join(args.result_directory, args.board)
# Overwrite the compile/test targets from command line arguments
RIOTApplication.COMPILE_TARGETS = args.compile_targets
RIOTApplication.FLASH_TARGETS = args.flash_targets
RIOTApplication.TEST_TARGETS = args.test_targets
RIOTApplication.TEST_AVAILABLE_TARGETS = args.test_available_targets
# List of applications for board
applications = [RIOTApplication(board, args.riot_directory, app_dir,
board_result_directory,
junit=args.report_xml)
for app_dir in app_dirs]
# Execute tests
errors = [app.run_compilation_and_test(clean_after=args.clean_after,
runtest=not args.no_test,
incremental=args.incremental,
jobs=args.jobs,
with_test_only=args.with_test_only)
for app in applications]
errors = [e for e in errors if e is not None]
num_errors = len(errors)
summary = _test_failed_summary(errors, relpathstart=board_result_directory)
save_failure_summary(board_result_directory, summary)
if args.report_xml:
if not junit_xml:
raise ImportError("`junit-xml` required for --report-xml")
report_file = os.path.join(board_result_directory, "report.xml")
with open(report_file, "w+") as report:
junit_xml.TestSuite.to_file(
report,
[junit_xml.TestSuite('compile_and_test_for_{}'.format(board),
[app.testcase for app in applications])]
)
if num_errors:
logger.error('Tests failed: %d', num_errors)
print(summary, end='')
else:
logger.info('Tests successful')
sys.exit(num_errors)
if __name__ == '__main__':
main(PARSER.parse_args())
|
westernx/mayatools
|
refs/heads/master
|
mayatools/mcc.py
|
1
|
import os
import struct
import glob
class ParseError(RuntimeError):
pass
_get_channels_results = {}
def get_channels(xml_path, memoize=True):
"""Get a list of channel names and their point counts from a Maya MCC cache.
:param str xml_path: The XML file for the given cache.
:param bool memoize: Use memoization to avoid parsing?
:return: List of ``(name, size)`` tuples for each channel.
:raises ParseError:
"""
mcc_paths = glob.glob(os.path.join(os.path.dirname(xml_path), os.path.splitext(os.path.basename(xml_path))[0] + 'Frame*.mc'))
if not mcc_paths:
raise ParseError('Could not find any *.mc for %r' % xml_path)
mcc_path = mcc_paths[0]
stat = os.stat(mcc_path)
# Return memoized results.
if (mcc_path in _get_channels_results and
_get_channels_results[mcc_path][0] == stat.st_size and
_get_channels_results[mcc_path][1] == stat.st_mtime
):
# Return a copy of the list.
return list(_get_channels_results[mcc_path][2])
fh = open(mcc_path, 'rb')
# File header block.
tag = fh.read(4)
if tag != 'FOR4':
raise RuntimeError('bad FOR4 tag %r @ %x' % (tag, fh.tell()))
offset = struct.unpack('>i', fh.read(4))[0]
fh.seek(offset, 1)
# Channel data block.
tag = fh.read(4)
if tag != 'FOR4':
raise RuntimeError('bad FOR4 tag %r @ %x' % (tag, fh.tell()))
# Start of channel data.
offset = struct.unpack('>i', fh.read(4))[0]
tag = fh.read(4)
if tag != 'MYCH':
raise RuntimeError('bad MYCH tag %r @ %x' % (tag, fh.tell()))
channels = []
while True:
# Channel name.
tag = fh.read(4)
if not tag:
# We have reached the end of the file, and so we are done.
break
if tag != 'CHNM':
raise RuntimeError('bad CHNM tag %r @ %x' % (tag, fh.tell()))
name_size = struct.unpack('>i', fh.read(4))[0]
name = fh.read(name_size)[:-1]
# The stored name is padded to the next 4-byte boundary.
mask = 3
padded = (name_size + mask) & (~mask)
padding = padded - name_size
if padding:
fh.seek(padding, 1)
# Channel size (e.g. point count).
tag = fh.read(4)
if tag != 'SIZE':
raise RuntimeError('bad SIZE tag %r @ %x' % (tag, fh.tell()))
point_count_size = struct.unpack('>i', fh.read(4))[0]
if point_count_size != 4:
raise RuntimeError('bad point_count_size %r @ %x' % (point_count_size, fh.tell()))
point_count = struct.unpack('>i', fh.read(point_count_size))[0]
channels.append((name, point_count))
# Skip the actual data.
tag = fh.read(4)
if tag == 'FVCA':
fh.seek(3 * 4 * point_count + 4, 1)
elif tag == 'DVCA':
fh.seek(3 * 8 * point_count + 4, 1)
else:
raise RuntimeError('bad FVCA/DVCA tag %r @ %x' % (tag, fh.tell()))
# Memoize the result.
_get_channels_results[mcc_path] = (stat.st_size, stat.st_mtime, channels)
return channels
|
timlinux/inasafe
|
refs/heads/develop
|
safe/processors/post_processors.py
|
2
|
# coding=utf-8
# pylint: disable=pointless-string-statement
# This is disabled for typehinting docstring.
"""Definitions relating to post-processing."""
import logging
from safe.definitions.exposure import exposure_place
from safe.definitions.extra_keywords import (
extra_keyword_earthquake_longitude,
extra_keyword_earthquake_latitude
)
from safe.definitions.fields import (
bearing_field,
direction_field,
distance_field,
feature_rate_field,
feature_value_field,
size_field,
hazard_class_field,
affected_field
)
from safe.definitions.hazard import hazard_earthquake
from safe.definitions.hazard_classifications import not_exposed_class
from safe.processors.post_processor_functions import (
calculate_bearing,
calculate_cardinality,
calculate_distance,
multiply,
size,
post_processor_affected_function)
from safe.processors.post_processor_inputs import (
geometry_property_input_type,
layer_property_input_type,
size_calculator_input_value,
keyword_input_type,
field_input_type,
keyword_value_expected)
from safe.utilities.i18n import tr
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
LOGGER = logging.getLogger('InaSAFE')
formula_process = {
'key': 'formula',
'description': tr(
'This type of process is a formula which is interpreted and executed '
'by the post processor.')
}
function_process = {
'key': 'function',
'description': tr(
'This type of process takes inputs as arguments and processes them '
'by passing them to a Python function.')
}
post_processor_process_types = [
formula_process, function_process
]
post_processor_size = {
'key': 'post_processor_size',
'name': tr('Size Value Post Processor'),
'description': tr(
u'A post processor to calculate the size of the feature. The unit is '
u'defined in the exposure definition.'),
'input': {
'size_calculator': {
'type': layer_property_input_type,
'value': size_calculator_input_value,
},
'geometry': {
'type': geometry_property_input_type
}
},
'output': {
'size': {
'value': size_field,
'type': function_process,
'function': size
}
}
}
post_processor_distance = {
'key': 'post_processor_distance',
'name': tr('Distance Post Processor'),
'description': tr(
'A post processor to calculate the distance between two points.'),
'input': {
'distance_calculator': {
'type': layer_property_input_type,
'value': size_calculator_input_value,
},
'place_geometry': {
'type': geometry_property_input_type
},
'latitude': {
'type': keyword_input_type,
'value': [
'hazard_keywords',
'extra_keywords',
extra_keyword_earthquake_latitude['key']
]
},
'longitude': {
'type': keyword_input_type,
'value': [
'hazard_keywords',
'extra_keywords',
extra_keyword_earthquake_longitude['key']
]
},
'earthquake_hazard': {
'type': keyword_value_expected,
'value': ['hazard_keywords', 'hazard'],
'expected_value': hazard_earthquake['key']
},
'place_exposure': {
'type': keyword_value_expected,
'value': ['exposure_keywords', 'exposure'],
'expected_value': exposure_place['key']
}
},
'output': {
'size': {
'value': distance_field,
'type': function_process,
'function': calculate_distance
}
}
}
post_processor_bearing = {
'key': 'post_processor_bearing',
'name': tr('Bearing Angle Post Processor'),
'description': tr(
'A post processor to calculate the bearing angle between two points.'
),
'input': {
'place_geometry': {
'type': geometry_property_input_type
},
'latitude': {
'type': keyword_input_type,
'value': [
'hazard_keywords',
'extra_keywords',
extra_keyword_earthquake_latitude['key']
]
},
'longitude': {
'type': keyword_input_type,
'value': [
'hazard_keywords',
'extra_keywords',
extra_keyword_earthquake_longitude['key']
]
},
'earthquake_hazard': {
'type': keyword_value_expected,
'value': ['hazard_keywords', 'hazard'],
'expected_value': hazard_earthquake['key']
},
'place_exposure': {
'type': keyword_value_expected,
'value': ['exposure_keywords', 'exposure'],
'expected_value': exposure_place['key']
}
},
'output': {
'size': {
'value': bearing_field,
'type': function_process,
'function': calculate_bearing
}
}
}
post_processor_cardinality = {
'key': 'post_processor_cardinality',
'name': tr('Cardinality Post Processor'),
'description': tr(
'A post processor to calculate the cardinality of an angle.'
),
'input': {
'angle': {
'type': field_input_type,
'value': bearing_field
},
'earthquake_hazard': {
'type': keyword_value_expected,
'value': ['hazard_keywords', 'hazard'],
'expected_value': hazard_earthquake['key']
},
'place_exposure': {
'type': keyword_value_expected,
'value': ['exposure_keywords', 'exposure'],
'expected_value': exposure_place['key']
}
},
'output': {
'size': {
'value': direction_field,
'type': function_process,
'function': calculate_cardinality
}
}
}
post_processor_size_rate = {
'key': 'post_processor_size_rate',
'name': tr('Size Rate Post Processor'),
'description': tr(
u'A post processor to calculate the value of a feature based on its '
u'size. If a feature is a polygon the size is calculated as '
u'the area in m². If the feature is a line we use length in metres.'),
'input': {
'size': {
'type': field_input_type,
'value': size_field,
},
'rate': {
'value': feature_rate_field,
'type': field_input_type
}
},
'output': {
'elderly': {
'value': feature_value_field,
'type': function_process,
'function': multiply
}
}
}
# We can access a specific keyword by specifying a list of keys to reach the
# keyword.
# For instance ['hazard_keywords', 'classification'].
post_processor_affected = {
'key': 'post_processor_affected',
'name': tr('Affected Post Processor'),
'description': tr(
'A post processor to determine if a feature is affected or not '
'(according to the hazard classification). It can be '
'"{not_exposed_value}".').format(
not_exposed_value=not_exposed_class['key']
),
'input': {
'hazard_class': {
'value': hazard_class_field,
'type': field_input_type,
},
'exposure': {
'type': keyword_input_type,
'value': ['exposure_keywords', 'exposure'],
},
'classification': {
'type': keyword_input_type,
'value': ['hazard_keywords', 'classification'],
},
'hazard': {
'type': keyword_input_type,
'value': ['hazard_keywords', 'hazard'],
},
},
'output': {
'affected': {
'value': affected_field,
'type': function_process,
'function': post_processor_affected_function
}
}
}
|
apagac/cfme_tests
|
refs/heads/master
|
cfme/tests/containers/test_manageiq_ansible_custom_attributes.py
|
3
|
import pytest
from cfme.containers.provider import ContainersProvider
from cfme.utils.ansible import create_tmp_directory
from cfme.utils.ansible import fetch_miq_ansible_module
from cfme.utils.ansible import remove_tmp_files
from cfme.utils.ansible import run_ansible
from cfme.utils.ansible import setup_ansible_script
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.tier(1),
pytest.mark.provider([ContainersProvider], scope='function')]
custom_attributes_to_add = {
'name': 'custom1',
'value': 'first value'
}, {
'name': 'custom2',
'value': 'second value'
}
custom_attributes_to_edit = {
'name': 'custom1',
'value': 'third value'
}, {
'name': 'custom2',
'value': 'fourth value'
}
@pytest.fixture(scope='function')
def ansible_custom_attributes():
create_tmp_directory()
fetch_miq_ansible_module()
yield
remove_tmp_files()
def verify_custom_attributes(appliance, provider, custom_attributes_to_verify):
view = navigate_to(provider, 'Details', force=True)
assert view.entities.summary('Custom Attributes').is_displayed
for custom_attribute in custom_attributes_to_verify:
assert (
str(view.entities.summary('Custom Attributes').get_text_of(custom_attribute['name']) ==
str(custom_attribute['value'])))
def test_manageiq_ansible_add_custom_attributes(appliance, ansible_custom_attributes, provider):
"""This test checks adding a Custom Attribute using Ansible script via Manage IQ module
Steps:
1. 'add_custom_attributes.yml script runs against the appliance
and adds custom attributes
2. Test navigates to Providers page and verifies the Custom Attributes
were added under Providers menu
Polarion:
assignee: juwatts
caseimportance: medium
casecomponent: Containers
initialEstimate: 1/6h
"""
setup_ansible_script(provider, script='add_custom_attributes',
values_to_update=custom_attributes_to_add,
script_type='custom_attributes')
run_ansible('add_custom_attributes')
verify_custom_attributes(appliance=appliance,
provider=provider,
custom_attributes_to_verify=custom_attributes_to_add)
setup_ansible_script(provider, script='remove_custom_attributes',
values_to_update=custom_attributes_to_add,
script_type='custom_attributes')
run_ansible('remove_custom_attributes')
view = navigate_to(provider, 'Details', force=True)
assert not view.entities.summary('Custom Attributes').is_displayed
def test_manageiq_ansible_edit_custom_attributes(appliance, ansible_custom_attributes, provider):
"""This test checks editing a Custom Attribute using Ansible script via Manage IQ module
Steps:
1. 'add_custom_attributes.yml script runs against the appliance
and edits custom attributes
2. Test navigates to Providers page and verifies the Custom Attributes
were edited under Providers menu
Polarion:
assignee: juwatts
caseimportance: medium
casecomponent: Containers
initialEstimate: 1/6h
"""
setup_ansible_script(provider, script='add_custom_attributes',
values_to_update=custom_attributes_to_edit,
script_type='custom_attributes')
run_ansible('add_custom_attributes')
verify_custom_attributes(appliance=appliance,
provider=provider,
custom_attributes_to_verify=custom_attributes_to_edit)
setup_ansible_script(provider, script='remove_custom_attributes',
values_to_update=custom_attributes_to_edit,
script_type='custom_attributes')
run_ansible('remove_custom_attributes')
view = navigate_to(provider, 'Details', force=True)
assert not view.entities.summary('Custom Attributes').is_displayed
def test_manageiq_ansible_add_custom_attributes_same_name(appliance, ansible_custom_attributes,
provider):
"""This test checks adding a Custom Attribute with the same name
using Ansible script via Manage IQ module
Steps:
1. 'add_custom_attributes_same_name.yml script runs against the appliance
and adds same custom attributes that were already used
2. Test navigates to Providers page and verifies the Custom Attributes
weren't added under Providers menu
Polarion:
assignee: juwatts
caseimportance: medium
casecomponent: Containers
initialEstimate: 1/6h
"""
setup_ansible_script(provider, script='add_custom_attributes',
values_to_update=custom_attributes_to_edit,
script_type='custom_attributes')
run_ansible('add_custom_attributes')
run_ansible('add_custom_attributes')
verify_custom_attributes(appliance=appliance,
provider=provider,
custom_attributes_to_verify=custom_attributes_to_edit)
setup_ansible_script(provider, script='remove_custom_attributes',
values_to_update=custom_attributes_to_edit,
script_type='custom_attributes')
run_ansible('remove_custom_attributes')
view = navigate_to(provider, 'Details', force=True)
assert not view.entities.summary('Custom Attributes').is_displayed
def test_manageiq_ansible_add_custom_attributes_bad_user(appliance, ansible_custom_attributes,
provider):
"""This test checks adding a Custom Attribute with a bad user name
using Ansible script via Manage IQ module
Steps:
1. 'add_custom_attributes_bad_user.yml script runs against the appliance
and tries to add custom attributes.
2. Verify error message with Ansible reply
3. Test navigates to Providers page and verifies the Custom Attributes
weren't added under Providers menu
Polarion:
assignee: juwatts
caseimportance: medium
casecomponent: Containers
initialEstimate: 1/6h
"""
setup_ansible_script(provider, script='add_custom_attributes_bad_user',
values_to_update=custom_attributes_to_edit,
script_type='custom_attributes')
run_result = run_ansible('add_custom_attributes_bad_user')
assert 'Authentication failed' in run_result
view = navigate_to(provider, 'Details', force=True)
assert not view.entities.summary('Custom Attributes').is_displayed
@pytest.mark.usefixtures('setup_provider')
def test_manageiq_ansible_remove_custom_attributes(appliance, ansible_custom_attributes, provider):
"""This test checks removing Custom Attribute using Ansible script via Manage IQ module
Steps:
1. 'remove_custom_attributes.yml script runs against the appliance
and removes custom attributes
2. Test navigates to Providers page and verifies the Custom Attributes
were removed under Providers menu
Polarion:
assignee: juwatts
caseimportance: medium
casecomponent: Containers
initialEstimate: 1/6h
"""
setup_ansible_script(provider, script='add_custom_attributes',
values_to_update=custom_attributes_to_add,
script_type='custom_attributes')
run_ansible('add_custom_attributes')
setup_ansible_script(provider, script='remove_custom_attributes',
values_to_update=custom_attributes_to_add,
script_type='custom_attributes')
run_ansible('remove_custom_attributes')
view = navigate_to(provider, 'Details', force=True)
assert not view.entities.summary('Custom Attributes').is_displayed
|
junghans/espressopp
|
refs/heads/master
|
src/analysis/Energy.py
|
2
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
******************************
**espressopp.analysis.Energy**
******************************
.. function:: espressopp.analysis.EnergyPot(system, per_atom)
:param system:
:param per_atom: (default: False)
:type system:
:type per_atom:
.. function:: espressopp.analysis.EnergyPot.compute()
:rtype:
.. function:: espressopp.analysis.EnergyKin(system, per_atom)
:param system:
:param per_atom: (default: False)
:type system:
:type per_atom:
.. function:: espressopp.analysis.EnergyKin.compute()
:rtype:
.. function:: espressopp.analysis.EnergyTot(system, per_atom)
:param system:
:param per_atom: (default: False)
:type system:
:type per_atom:
.. function:: espressopp.analysis.EnergyTot.compute()
:rtype:
"""
import espressopp
class EnergyPot():
def __init__(self, system, per_atom=False):
self.system = system
self.per_atom = per_atom
def compute(self):
EPot = 0.0
for k in range(self.system.getNumberOfInteractions()):
EPot += self.system.getInteraction(k).computeEnergy()
if self.per_atom:
NPart = espressopp.analysis.NPart(self.system).compute()
return EPot / NPart
else:
return EPot
class EnergyKin():
def __init__(self, system, per_atom=False):
self.system = system
self.per_atom = per_atom
def compute(self):
NPart = espressopp.analysis.NPart(self.system).compute()
T = espressopp.analysis.Temperature(self.system).compute()
EKin = (3.0/2.0) * NPart * T
if self.per_atom:
return EKin / NPart
else:
return EKin
class EnergyTot():
def __init__(self, system, per_atom=False):
self.system = system
self.per_atom = per_atom
def compute(self):
NPart = espressopp.analysis.NPart(self.system).compute()
T = espressopp.analysis.Temperature(self.system).compute()
EKin = (3.0/2.0) * NPart * T
EPot = 0.0
for k in range(self.system.getNumberOfInteractions()):
EPot += self.system.getInteraction(k).computeEnergy()
if self.per_atom:
return (EPot + EKin) / NPart
else:
return (EPot + EKin)
|
mbevilacqua/appcompatprocessor
|
refs/heads/master
|
test/auxTest.py
|
1
|
import os
import ntpath
import settings
import random
import tempfile
import appDB
import unicodedata
import logging
from collections import defaultdict
from appAux import update_progress
try:
from faker import Factory
from faker.providers import BaseProvider
except ImportError:
if settings.__FAKER__:
settings.__FAKER__ = False
raise ImportError
else: settings.__FAKER__ = True
settings.init()
logger = logging.getLogger(__name__)
class WeightedRandomizer:
def __init__ (self, weights):
self.__max = .0
self.__weights = []
for value, weight in weights.items ():
self.__max += weight
self.__weights.append ( (self.__max, value) )
def random (self):
r = random.random () * self.__max
for ceil, value in self.__weights:
if ceil > r: return int(value)
class ACPProvider(BaseProvider):
def gen_filename(self):
file_extensions = ['bat','cmd','com','cpl','dat','dll','exe','msc','msi','scr','tmp','vbs']
return os.path.splitext(fake.file_name(category=None, extension=None))[0] + "." + random.choice(file_extensions)
def path(self):
data_drives = ['C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'D', 'E', 'F']
data_paths = {}
data_paths[''] = ['$Recycle.Bin','$RECYCLE.BIN','Documents and Settings', 'Users', 'EMC','EMC Files','EMC Reports','hp','Inetpub','Program Files','Program Files (x86)','ProgramData','Windows','Windows.old','Winnt']
data_paths['Windows'] = ['Application', 'ccmcache', 'ccmsetup', 'Cluster', 'discagnt', 'Installer', 'Microsoft', 'PCHealth','SoftwareDistribution', 'System32', 'SysWOW64', 'Temp', 'WinSxS']
data_paths['Windows.old'] = ['Application', 'ccmcache', 'ccmsetup', 'Cluster', 'discagnt', 'Installer', 'Microsoft', 'PCHealth','SoftwareDistribution', 'System32', 'SysWOW64', 'Temp', 'WinSxS']
data_paths['Winnt'] = ['Application', 'ccmcache', 'ccmsetup', 'Cluster', 'discagnt', 'Installer', 'Microsoft', 'PCHealth','SoftwareDistribution', 'System32', 'SysWOW64', 'Temp', 'WinSxS']
data_paths['Program Files'] = ['Adobe','Altiris','Apache','Blue','BMC','Citrix','Common','Dell','DisplayLink','EMC','HBAnyware','HP','IBM','IIS','InstallShield','Internet','Java','JBoss','Legato','Mirosoft','Mozilla','MySQL','OmniBack','Outlook','Reuters','RSA','SAS','SmartDraw','SplunkUniversalForwarder','Symantec','Symmetricom','System','TeraCopy','Trend','Tripwie','Unlocker','Virtual','VMware','WinAutomation','Windows','WinRAR','WinSCP','WinZip','Wireshark']
data_paths['Program Files (x86)'] = ['Adobe','Altiris','Apache','Blue','BMC','Citrix','Common','Dell','DisplayLink','EMC','HBAnyware','HP','IBM','IIS','InstallShield','Internet','Java','JBoss','Legato','Mirosoft','Mozilla','MySQL','OmniBack','Outlook','Reuters','RSA','SAS','SmartDraw','SplunkUniversalForwarder','Symantec','Symmetricom','System','TeraCopy','Trend','Tripwie','Unlocker','Virtual','VMware','WinAutomation','Windows','WinRAR','WinSCP','WinZip','Wireshark']
data_paths['Documents and Settings'] = ['Administrator','All Users','Default User']
data_paths['Users'] = ['Administrator','All Users','Default User']
data_paths['Administrator'] = ['Application Data','Desktop','Local Settings','My Documents','Start Menu']
data_paths['All Users'] = ['Application Data','Desktop','Local Settings','My Documents','Start Menu']
data_paths['Default Users'] = ['Application Data','Desktop','Local Settings','My Documents','Start Menu']
data_paths['Application Data'] = ['Google','Microsoft','Microsoft Office','Skype','uTorrent']
data_paths['Local Settings'] = ['Application','Apps','Temp']
data_paths['Inetpub'] = ['sites','wwwroot']
# Number of folders weighted distribution extracted from a 3K host database
folderNumWeightedDistribution = {'14': 0.035, '13': 0.07, '12': 0.125, '11': 0.485, '10': 1.025, '9': 72.205, '8': 66.805, '7': 30.35,
'6': 170.38, '5': 48.83, '4': 30.165, '3': 102.39, '2': 23.245, '1': 286.27}
folderNumWeightedRandomizer = WeightedRandomizer(folderNumWeightedDistribution)
file_name = ""
# Assign drive
file_name += (random.choice(data_drives)) + ":"
# Build path
current_subpath = ''
for i in xrange(1, folderNumWeightedRandomizer.random()):
if current_subpath in ['Documents and Settings', 'Users']:
current_subpath = fake.name().replace(' ','_')
elif current_subpath in data_paths:
current_subpath = (random.choice(data_paths[current_subpath]))
else:
current_subpath = os.path.splitext(fake.file_name(category=None, extension=None))[0]
file_name += "\\" + current_subpath
# Add file
file_name += "\\" + self.gen_filename()
return file_name
fake = Factory.create()
fake_ES = Factory.create('es_ES')
# Add new provider to faker instance
fake.add_provider(ACPProvider)
def strip_non_ascii(string):
''' Returns the string without non ASCII characters'''
stripped = (c for c in string if 0 < ord(c) < 127)
return ''.join(stripped).replace("'", "")
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def add_entry(DB, HostName, entry_fields):
# Insert host if it doesn't exist
Instances = []
InstancesCounter = 0
Recon = 0
ReconScoring = 0
DB.ExecuteMany("INSERT OR IGNORE INTO Hosts VALUES (NULL,?,?,?,?,?)",
[(HostName, str(repr(Instances)), InstancesCounter, Recon, ReconScoring)])
# Get HostID
HostID = DB.Query("SELECT HostID FROM Hosts WHERE HostName = '%s'" % HostName)[0][0]
# Add FilePath if not there yet
DB.Execute("INSERT OR IGNORE INTO FilePaths VALUES (NULL, '%s')" % entry_fields.FilePath)
# Get FilePathID
FilePathID = DB.QueryInt("SELECT FilePathID FROM FilePaths WHERE FilePath = '%s'" % entry_fields.FilePath)
insertList = []
insertList.append((HostID, entry_fields.EntryType, entry_fields.RowNumber, entry_fields.LastModified,
entry_fields.LastUpdate, FilePathID, entry_fields.FileName, entry_fields.Size,
entry_fields.ExecFlag, entry_fields.SHA1, entry_fields.FileDescription, entry_fields.FirstRun,
entry_fields.Created, entry_fields.Modified1, entry_fields.Modified2, entry_fields.LinkerTS,
entry_fields.Product, entry_fields.Company, entry_fields.PE_sizeofimage, entry_fields.Version_number,
entry_fields.Version, entry_fields.Language, entry_fields.Header_hash, entry_fields.PE_checksum,
entry_fields.SwitchBackContext, entry_fields.InstanceID))
numFields = 29 - 3
valuesQuery = "(NULL," + "?," * numFields + "0, 0)"
DB.ExecuteMany("INSERT INTO Entries VALUES " + valuesQuery, insertList)
def build_fake_DB(hosts = 10, seed = random.randint(0,10000), database_file = None):
hostnames_set = set()
filePaths_dict = defaultdict(int)
filePaths_dict_ID = 0
filePaths_dict_ID_skip = 0
random.seed(seed)
fake.seed(seed)
fake_ES.seed(seed)
if database_file == None:
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
database_file = tempdb.name
if os.path.isfile(database_file):
logger.warning("Adding hosts to existing database")
with appDB.DBClass(database_file, "False", settings.__version__) as DB:
conn = DB.appConnectDB()
# Load existing hosts
data = DB.Query("SELECT HostName FROM Hosts")
for hostName in data:
hostnames_set.add(hostName[0])
# Load existing paths
data = DB.Query("SELECT FilePathID, FilePath FROM FilePaths")
for filePathID, FilePath in data:
filePaths_dict[FilePath] = (filePathID)
filePaths_dict_ID += 1
filePaths_dict_ID_skip = filePaths_dict_ID
else:
with appDB.DBClass(database_file, "True", settings.__version__) as DB:
DB.appInitDB()
DB.appSetIndex()
conn = DB.appConnectDB()
DB.appRequireIndexesDB("index_EntriesHostName", "CREATE INDEX index_EntriesHostName on Hosts(HostName)")
DB.appRequireIndexesDB("index_FilePathsFilePath", "CREATE INDEX index_FilePathsFilePath on FilePaths(FilePath)")
with appDB.DBClass(database_file, "False", settings.__version__) as DB:
conn = DB.appConnectDB()
# Start creating hosts and data:
rowList = []
insertList = []
numFields = 29 - 3
valuesQuery = "(NULL," + "?," * numFields + "0, 0)"
progressCurrent = 0
progressTotal = hosts
for i in xrange(0,hosts):
progressCurrent += 1
update_progress(float(progressCurrent) / float(progressTotal))
HostName = ""
while True:
HostName = strip_accents((fake_ES.color_name() + fake_ES.country()).replace(' ', ''))
HostName = strip_non_ascii(HostName)
HostName += "_" + str(random.randint(000,999))
if HostName not in hostnames_set:
hostnames_set.add(HostName)
break
print "Creating appcompat/amcache data for host: %s" % HostName
Instances = ['dummy']
InstancesCounter = 1
Recon = 0
ReconScoring = 0
DB.ExecuteMany("INSERT INTO Hosts VALUES (NULL,?,?,?,?,?)", [(HostName, str(repr(Instances)), InstancesCounter, Recon, ReconScoring)])
HostID = DB.Query("SELECT HostID FROM Hosts WHERE HostName = '%s'" % HostName)[0][0]
# Sampled 2K hosts, this should statistically provide a somewhat realistic amount of entries (for AppCompat)
for i in xrange(1, random.randint(400,800)):
# EntryType = random.choice([settings.__APPCOMPAT__,settings.__AMCACHE__])
EntryType = settings.__APPCOMPAT__
RowNumber = 0
LastModified = str(fake.date_time_between('-1y')) + "." + str(random.randint(1,9999))
LastUpdate = str(fake.date_time_between('-4y')) + "." + str(random.randint(1,9999))
filePathID = 0
# todo: FilePath retains final backslash on root paths (c:\, d:\ ...) remove.
FilePath, FileName = ntpath.split(fake.path())
FilePath = FilePath.lower()
FileName = FileName.lower()
Size = random.randint(1,100000)
if EntryType == settings.__APPCOMPAT__:
ExecFlag = random.choice(['True','False'])
else: ExecFlag = 'True'
if EntryType == settings.__AMCACHE__:
SHA1 = fake.sha1()
FileDescription = random.choice(['', '', '', '', '', '', '', '', '', '', fake.text()])
FirstRun = str(fake.date_time_between('-1y')) + "." + str(random.randint(1,9999))
Created = str(fake.date_time_between('-5y')) + "." + str(random.randint(1,9999))
Modified1 = str(fake.date_time_between('-5y')) + "." + str(random.randint(1,9999))
Modified2 = str(fake.date_time_between('-5y')) + "." + str(random.randint(1,9999))
LinkerTS = str(fake.date_time_between('-10y'))
Company = fake.company()
PE_sizeofimage = random.randint(1,10000)
# Redo re-assignment of date we do on load for AmCache
LastUpdate = FirstRun
LastModified = Modified2
else:
SHA1 = ''
FileDescription = ''
FirstRun = ''
Created = ''
Modified1 = ''
Modified2 = ''
LinkerTS = ''
Company = ''
PE_sizeofimage = ''
Product = 0
Version_number = 0
Version = 0
Language = 0
Header_hash = 0
PE_checksum = 0
SwitchBackContext = 0
InstanceID = 0
# # Add FilePath if not there yet
# DB.Execute("INSERT OR IGNORE INTO FilePaths VALUES (NULL, '%s')" % FilePath)
# # Get FilePathID
# FilePathID = DB.QueryInt("SELECT FilePathID FROM FilePaths WHERE FilePath = '%s'" % FilePath)
if FilePath not in filePaths_dict:
filePaths_dict[FilePath] = (filePaths_dict_ID)
filePathID = filePaths_dict_ID
filePaths_dict_ID += 1
else: filePathID = filePaths_dict[FilePath]
insertList.append((HostID, EntryType, RowNumber, LastModified, LastUpdate, filePathID, FileName,
Size, ExecFlag, SHA1, FileDescription, FirstRun, Created, Modified1,
Modified2, LinkerTS, Product, Company, PE_sizeofimage, Version_number,
Version, Language, Header_hash, PE_checksum, SwitchBackContext, InstanceID))
# Dump every now and then:
if len(insertList) > 1000000:
logger.info("Dumping data to DB")
DB.ExecuteMany("INSERT INTO Entries VALUES " + valuesQuery, insertList)
insertList = []
# Insert last bucket
logger.info("Dumping last bucket to DB")
DB.ExecuteMany("INSERT INTO Entries VALUES " + valuesQuery, insertList)
# Insert new FilePaths
list_FilePath_ID = [(v, k) for k, v in filePaths_dict.items()]
list_FilePath_ID.sort(key=lambda tup: tup[0])
DB.ExecuteMany("INSERT INTO FilePaths VALUES (?,?)", list_FilePath_ID[filePaths_dict_ID_skip:])
return database_file
|
fairbird/OpenPLI-BlackHole
|
refs/heads/master
|
lib/python/Components/UsageConfig.py
|
1
|
from Components.Harddisk import harddiskmanager
from config import ConfigSubsection, ConfigYesNo, config, ConfigSelection, ConfigText, ConfigNumber, ConfigSet, ConfigLocations, ConfigSelectionNumber, ConfigClock, ConfigSlider, ConfigEnableDisable, ConfigSubDict, ConfigNothing, ConfigInteger, ConfigPassword, ConfigIP, ConfigDictionarySet
from Tools.Directories import defaultRecordingLocation
from enigma import setTunerTypePriorityOrder, setPreferredTuner, setSpinnerOnOff, setEnableTtCachingOnOff, eEnv, eDVBDB, Misc_Options, eBackgroundFileEraser, eServiceEvent
from Components.NimManager import nimmanager
from Components.Harddisk import harddiskmanager
from Components.ServiceList import refreshServiceList
from SystemInfo import SystemInfo
import os
import time
def InitUsageConfig():
config.usage = ConfigSubsection()
config.usage.subnetwork = ConfigYesNo(default = True)
config.usage.subnetwork_cable = ConfigYesNo(default = True)
config.usage.subnetwork_terrestrial = ConfigYesNo(default = True)
config.usage.showdish = ConfigYesNo(default = True)
config.misc.showrotorposition = ConfigSelection(default='no', choices=[('no', _('no')),
('yes', _('yes')),
('withtext', _('with text')),
('tunername', _('with tuner name'))])
config.usage.multibouquet = ConfigYesNo(default = True)
config.usage.alternative_number_mode = ConfigYesNo(default = True)
def alternativeNumberModeChange(configElement):
eDVBDB.getInstance().setNumberingMode(configElement.value)
refreshServiceList()
config.usage.alternative_number_mode.addNotifier(alternativeNumberModeChange)
config.usage.hide_number_markers = ConfigYesNo(default = True)
config.usage.hide_number_markers.addNotifier(refreshServiceList)
config.usage.servicetype_icon_mode = ConfigSelection(default='0', choices=[('0', _('None')), ('1', _('Left from servicename')), ('2', _('Right from servicename'))])
config.usage.servicetype_icon_mode.addNotifier(refreshServiceList)
config.usage.crypto_icon_mode = ConfigSelection(default='0', choices=[('0', _('None')), ('1', _('Left from servicename')), ('2', _('Right from servicename'))])
config.usage.crypto_icon_mode.addNotifier(refreshServiceList)
config.usage.record_indicator_mode = ConfigSelection(default='0', choices=[('0', _('None')),
('1', _('Left from servicename')),
('2', _('Right from servicename')),
('3', _('Red colored'))])
config.usage.record_indicator_mode.addNotifier(refreshServiceList)
choicelist = [('-1', _('Disable'))]
for i in range(0, 1300, 100):
choicelist.append((str(i), ngettext('%d pixel wide', '%d pixels wide', i) % i))
config.usage.servicelist_column = ConfigSelection(default='-1', choices=choicelist)
config.usage.servicelist_column.addNotifier(refreshServiceList)
config.usage.service_icon_enable = ConfigYesNo(default = False)
config.usage.service_icon_enable.addNotifier(refreshServiceList)
config.usage.servicelist_cursor_behavior = ConfigSelection(default='standard', choices=[('standard', _('Standard')),
('keep', _('Keep service')),
('reverseB', _('Reverse bouquet buttons')),
('keep reverseB', _('Keep service') + ' + ' + _('Reverse bouquet buttons'))])
choicelist = [('by skin', _('As defined by the skin'))]
for i in range(5, 41):
choicelist.append(str(i))
config.usage.servicelist_number_of_services = ConfigSelection(default='by skin', choices=choicelist)
config.usage.servicelist_number_of_services.addNotifier(refreshServiceList)
config.usage.multiepg_ask_bouquet = ConfigYesNo(default = False)
config.usage.quickzap_bouquet_change = ConfigYesNo(default = False)
config.usage.e1like_radio_mode = ConfigYesNo(default = True)
choicelist = [('0', _('No timeout'))]
for i in range(1, 20):
choicelist.append((str(i), ngettext('%d second', '%d seconds', i) % i))
config.usage.infobar_timeout = ConfigSelection(default='5', choices=choicelist)
config.usage.show_infobar_do_dimming = ConfigYesNo(default = True)
config.usage.show_infobar_dimming_speed = ConfigSelectionNumber(min = 1, max = 40, stepwidth = 1, default = 40, wraparound = True)
config.usage.show_infobar_on_zap = ConfigYesNo(default = True)
config.usage.show_infobar_on_skip = ConfigYesNo(default = True)
config.usage.show_infobar_on_event_change = ConfigYesNo(default = False)
config.usage.show_second_infobar = ConfigSelection(default=5, choices=[("", _('None'))] + choicelist + [('EPG', _('EPG'))])
config.usage.show_simple_second_infobar = ConfigYesNo(default = True)
config.usage.infobar_frontend_source = ConfigSelection(default='settings', choices=[('settings', _('Settings')), ('tuner', _('Tuner'))])
config.usage.oldstyle_zap_controls = ConfigYesNo(default = False)
config.usage.oldstyle_channel_select_controls = ConfigYesNo(default = False)
config.usage.zap_with_ch_buttons = ConfigYesNo(default = False)
config.usage.ok_is_channelselection = ConfigYesNo(default = False)
config.usage.volume_instead_of_channelselection = ConfigYesNo(default = False)
config.usage.channelselection_preview = ConfigYesNo(default = False)
config.usage.show_spinner = ConfigYesNo(default = True)
config.usage.menu_sort_weight = ConfigDictionarySet(default={'mainmenu': {'submenu': {}}})
config.usage.menu_sort_mode = ConfigSelection(default='default', choices=[('a_z', _('alphabetical')), ('default', _('Default')), ('user', _('user defined'))])
config.usage.menu_show_numbers = ConfigYesNo(default = False)
config.usage.menu_path = ConfigSelection(default='off', choices=[('off', _('Disabled')), ('small', _('Small')), ('large', _('Large'))])
config.usage.enable_tt_caching = ConfigYesNo(default = True)
choicelist = []
for i in (10, 30):
choicelist.append((str(i), ngettext('%d second', '%d seconds', i) % i))
for i in (60, 120, 300, 600, 1200, 1800):
m = i / 60
choicelist.append((str(i), ngettext('%d minute', '%d minutes', m) % m))
for i in (3600, 7200, 14400):
h = i / 3600
choicelist.append((str(i), ngettext('%d hour', '%d hours', h) % h))
config.usage.hdd_standby = ConfigSelection(default='300', choices=[('0', _('No standby'))] + choicelist)
config.usage.output_12V = ConfigSelection(default='do not change', choices=[('do not change', _('Do not change')), ('off', _('Off')), ('on', _('On'))])
config.usage.pip_zero_button = ConfigSelection(default='standard', choices=[('standard', _('Standard')),
('swap', _('Swap PiP and main picture')),
('swapstop', _('Move PiP to main picture')),
('stop', _('Stop PiP'))])
config.usage.pip_hideOnExit = ConfigSelection(default='without popup', choices=[('no', _('no')), ('popup', _('With popup')), ('without popup', _('Without popup'))])
choicelist = [('-1', _('Disabled')), ('0', _('No timeout'))]
for i in [60,
300,
600,
900,
1800,
2700,
3600]:
m = i / 60
choicelist.append((str(i), ngettext('%d minute', '%d minutes', m) % m))
config.usage.pip_last_service_timeout = ConfigSelection(default='0', choices=choicelist)
config.usage.default_path = ConfigText(default = '')
config.usage.timer_path = ConfigText(default='<default>')
config.usage.instantrec_path = ConfigText(default='<default>')
config.usage.timeshift_path = ConfigText(default='/media/hdd/')
config.usage.allowed_timeshift_paths = ConfigLocations(default=['/media/hdd/'])
config.ncaminfo = ConfigSubsection()
config.ncaminfo.showInExtensions = ConfigYesNo(default = False)
config.ncaminfo.userdatafromconf = ConfigYesNo(default = False)
config.ncaminfo.autoupdate = ConfigYesNo(default = False)
config.ncaminfo.username = ConfigText(default='username', fixed_size=False, visible_width=12)
config.ncaminfo.password = ConfigPassword(default='password', fixed_size=False)
config.ncaminfo.ip = ConfigIP(default=[127,
0,
0,
1], auto_jump=True)
config.ncaminfo.port = ConfigInteger(default=8181, limits=(0, 65536))
config.ncaminfo.intervall = ConfigSelectionNumber(min=1, max=600, stepwidth=1, default=10, wraparound=True)
config.oscaminfo = ConfigSubsection()
config.oscaminfo.showInExtensions = ConfigYesNo(default = False)
config.oscaminfo.userdatafromconf = ConfigYesNo(default = False)
config.oscaminfo.autoupdate = ConfigYesNo(default = False)
config.oscaminfo.username = ConfigText(default='username', fixed_size=False, visible_width=12)
config.oscaminfo.password = ConfigPassword(default='password', fixed_size=False)
config.oscaminfo.ip = ConfigIP(default=[127,
0,
0,
1], auto_jump=True)
config.oscaminfo.port = ConfigInteger(default=16002, limits=(0, 65536))
config.oscaminfo.intervall = ConfigSelectionNumber(min=1, max=600, stepwidth=1, default=10, wraparound=True)
SystemInfo['OScamInstalled'] = False
config.cccaminfo = ConfigSubsection()
config.cccaminfo.showInExtensions = ConfigYesNo(default = False)
config.cccaminfo.serverNameLength = ConfigSelectionNumber(min=10, max=100, stepwidth=1, default=22, wraparound=True)
config.cccaminfo.name = ConfigText(default='Profile', fixed_size=False)
config.cccaminfo.ip = ConfigText(default='192.168.2.12', fixed_size=False)
config.cccaminfo.username = ConfigText(default='', fixed_size=False)
config.cccaminfo.password = ConfigText(default='', fixed_size=False)
config.cccaminfo.port = ConfigInteger(default=16001, limits=(1, 65535))
config.cccaminfo.profile = ConfigText(default='', fixed_size=False)
config.cccaminfo.ecmInfoEnabled = ConfigYesNo(default = True)
config.cccaminfo.ecmInfoTime = ConfigSelectionNumber(min=1, max=10, stepwidth=1, default=5, wraparound=True)
config.cccaminfo.ecmInfoForceHide = ConfigYesNo(default = True)
config.cccaminfo.ecmInfoPositionX = ConfigInteger(default=50)
config.cccaminfo.ecmInfoPositionY = ConfigInteger(default=50)
config.cccaminfo.blacklist = ConfigText(default='/media/cf/CCcamInfo.blacklisted', fixed_size=False)
config.cccaminfo.profiles = ConfigText(default='/media/cf/CCcamInfo.profiles', fixed_size=False)
config.usage.movielist_trashcan = ConfigYesNo(default = True)
config.usage.movielist_trashcan_days = ConfigNumber(default=8)
config.usage.movielist_trashcan_reserve = ConfigNumber(default=40)
config.usage.on_movie_start = ConfigSelection(default='resume', choices=[('ask yes', _('Ask user') + ' ' + _('default') + ' ' + _('yes')),
('ask no', _('Ask user') + ' ' + _('default') + ' ' + _('no')),
('resume', _('Resume from last position')),
('beginning', _('Start from the beginning'))])
config.usage.on_movie_stop = ConfigSelection(default='movielist', choices=[('ask', _('Ask user')), ('movielist', _('Return to movie list')), ('quit', _('Return to previous service'))])
config.usage.on_movie_eof = ConfigSelection(default='movielist', choices=[('ask', _('Ask user')),
('movielist', _('Return to movie list')),
('quit', _('Return to previous service')),
('pause', _('Pause movie at end')),
('playlist', _('Play next (return to movie list)')),
('playlistquit', _('Play next (return to previous service)')),
('loop', _('Continues play (loop)')),
('repeatcurrent', _('Repeat'))])
config.usage.next_movie_msg = ConfigYesNo(default = True)
config.usage.last_movie_played = ConfigText()
config.usage.leave_movieplayer_onExit = ConfigSelection(default='popup', choices=[('no', _('no')),
('popup', _('With popup')),
('without popup', _('Without popup')),
('movielist', _('Return to movie list'))])
config.usage.setup_level = ConfigSelection(default='expert', choices=[('simple', _('Normal')), ('intermediate', _('Advanced')), ('expert', _('Expert'))])
config.usage.startup_to_standby = ConfigSelection(default='no', choices=[('no', _('no')), ('yes', _('yes')), ('except', _('No, except Wakeup timer'))])
config.usage.wakeup_enabled = ConfigSelection(default = "no", choices = [
("no", _("no")),
("yes", _("yes")),
("standby", _("Yes, only from standby")),
("deepstandby", _("Yes, only from deep standby")) ])
config.usage.wakeup_day = ConfigSubDict()
config.usage.wakeup_time = ConfigSubDict()
for i in range(7):
config.usage.wakeup_day[i] = ConfigEnableDisable(default = False)
config.usage.wakeup_time[i] = ConfigClock(default=21600)
choicelist = [('0', _('Do nothing'))]
for i in range(3600, 21601, 3600):
h = abs(i / 3600)
h = ngettext('%d hour', '%d hours', h) % h
choicelist.append((str(i), _('Standby in ') + h))
config.usage.inactivity_timer = ConfigSelection(default='0', choices=choicelist)
config.usage.inactivity_timer_blocktime = ConfigYesNo(default = True)
config.usage.inactivity_timer_blocktime_begin = ConfigClock(default=time.mktime((0, 0, 0, 18, 0, 0, 0, 0, 0)))
config.usage.inactivity_timer_blocktime_end = ConfigClock(default=time.mktime((0, 0, 0, 23, 0, 0, 0, 0, 0)))
config.usage.inactivity_timer_blocktime_extra = ConfigYesNo(default = False)
config.usage.inactivity_timer_blocktime_extra_begin = ConfigClock(default=time.mktime((0, 0, 0, 6, 0, 0, 0, 0, 0)))
config.usage.inactivity_timer_blocktime_extra_end = ConfigClock(default=time.mktime((0, 0, 0, 9, 0, 0, 0, 0, 0)))
choicelist = [('0', _('Disabled')), ('event_standby', _('Standby after current event'))]
for i in range(900, 7201, 900):
m = abs(i / 60)
m = ngettext('%d minute', '%d minutes', m) % m
choicelist.append((str(i), _('Standby in ') + m))
config.usage.sleep_timer = ConfigSelection(default='0', choices=choicelist)
choicelist = [('0', _('Disabled'))]
for i in [300, 300, 600] + range(900, 7201, 900):
m = abs(i / 60)
m = ngettext('%d minute', '%d minutes', m) % m
choicelist.append((str(i), _('after ') + m))
config.usage.standby_to_shutdown_timer = ConfigSelection(default='0', choices=choicelist)
config.usage.standby_to_shutdown_timer_blocktime = ConfigYesNo(default = True)
config.usage.standby_to_shutdown_timer_blocktime_begin = ConfigClock(default=time.mktime((0, 0, 0, 6, 0, 0, 0, 0, 0)))
config.usage.standby_to_shutdown_timer_blocktime_end = ConfigClock(default=time.mktime((0, 0, 0, 23, 0, 0, 0, 0, 0)))
choicelist = [('0', _('Disabled'))]
for m in (1, 5, 10, 15, 30, 60):
choicelist.append((str(m * 60), ngettext('%d minute', '%d minutes', m) % m))
config.usage.screen_saver = ConfigSelection(default='300', choices=choicelist)
config.usage.check_timeshift = ConfigYesNo(default = True)
choicelist = [('0', _('Disabled'))]
for i in (2, 3, 4, 5, 10, 20, 30):
choicelist.append((str(i), ngettext('%d second', '%d seconds', i) % i))
for i in (60, 120, 300):
m = i / 60
choicelist.append((str(i), ngettext('%d minute', '%d minutes', m) % m))
config.usage.timeshift_start_delay = ConfigSelection(default='0', choices=choicelist)
config.usage.alternatives_priority = ConfigSelection(default='0', choices=[('0', 'DVB-S/-C/-T'),
('1', 'DVB-S/-T/-C'),
('2', 'DVB-C/-S/-T'),
('3', 'DVB-C/-T/-S'),
('4', 'DVB-T/-C/-S'),
('5', 'DVB-T/-S/-C'),
('127', _('No priority'))])
config.usage.remote_fallback_enabled = ConfigYesNo(default = False)
config.usage.remote_fallback = ConfigText(default='', fixed_size=False)
config.usage.show_timer_conflict_warning = ConfigYesNo(default = True)
dvbs_nims = [('-2', _('Disabled'))]
dvbt_nims = [('-2', _('Disabled'))]
dvbc_nims = [('-2', _('Disabled'))]
atsc_nims = [('-2', _('Disabled'))]
nims = [('-1', _('auto'))]
for x in nimmanager.nim_slots:
if x.isCompatible('DVB-S'):
dvbs_nims.append((str(x.slot), x.getSlotName()))
elif x.isCompatible('DVB-T'):
dvbt_nims.append((str(x.slot), x.getSlotName()))
elif x.isCompatible('DVB-C'):
dvbc_nims.append((str(x.slot), x.getSlotName()))
elif x.isCompatible('ATSC'):
atsc_nims.append((str(x.slot), x.getSlotName()))
nims.append((str(x.slot), x.getSlotName()))
config.usage.frontend_priority = ConfigSelection(default='-1', choices=list(nims))
nims.insert(0, ('-2', _('Disabled')))
config.usage.recording_frontend_priority = ConfigSelection(default='-2', choices=nims)
config.usage.frontend_priority_dvbs = ConfigSelection(default='-2', choices=list(dvbs_nims))
dvbs_nims.insert(1, ('-1', _('auto')))
config.usage.recording_frontend_priority_dvbs = ConfigSelection(default='-2', choices=dvbs_nims)
config.usage.frontend_priority_dvbt = ConfigSelection(default='-2', choices=list(dvbt_nims))
dvbt_nims.insert(1, ('-1', _('auto')))
config.usage.recording_frontend_priority_dvbt = ConfigSelection(default='-2', choices=dvbt_nims)
config.usage.frontend_priority_dvbc = ConfigSelection(default='-2', choices=list(dvbc_nims))
dvbc_nims.insert(1, ('-1', _('auto')))
config.usage.recording_frontend_priority_dvbc = ConfigSelection(default='-2', choices=dvbc_nims)
config.usage.frontend_priority_atsc = ConfigSelection(default = "-2", choices = list(atsc_nims))
atsc_nims.insert(1,('-1', _('auto')))
config.usage.recording_frontend_priority_atsc = ConfigSelection(default = '-2', choices = atsc_nims)
SystemInfo['DVB-S_priority_tuner_available'] = len(dvbs_nims) > 3 and any(len(i) > 2 for i in (dvbt_nims, dvbc_nims, atsc_nims))
SystemInfo['DVB-T_priority_tuner_available'] = len(dvbt_nims) > 3 and any(len(i) > 2 for i in (dvbs_nims, dvbc_nims, atsc_nims))
SystemInfo['DVB-C_priority_tuner_available'] = len(dvbc_nims) > 3 and any(len(i) > 2 for i in (dvbs_nims, dvbt_nims, atsc_nims))
SystemInfo['ATSC_priority_tuner_available'] = len(atsc_nims) > 3 and any(len(i) > 2 for i in (dvbs_nims, dvbc_nims, dvbt_nims))
config.misc.disable_background_scan = ConfigYesNo(default = False)
config.usage.show_event_progress_in_servicelist = ConfigSelection(default='barright', choices=[('barleft', _('Progress bar left')),
('barright', _('Progress bar right')),
('percleft', _('Percentage left')),
('percright', _('Percentage right')),
('no', _('no'))])
config.usage.show_channel_numbers_in_servicelist = ConfigYesNo(default = True)
config.usage.show_event_progress_in_servicelist.addNotifier(refreshServiceList)
config.usage.show_channel_numbers_in_servicelist.addNotifier(refreshServiceList)
config.usage.blinking_display_clock_during_recording = ConfigYesNo(default = False)
config.usage.show_message_when_recording_starts = ConfigYesNo(default = True)
config.usage.load_length_of_movies_in_moviellist = ConfigYesNo(default = True)
config.usage.show_icons_in_movielist = ConfigSelection(default='i', choices=[('o', _('Off')),
('p', _('Progress')),
('s', _('Small progress')),
('i', _('Icons'))])
config.usage.movielist_unseen = ConfigYesNo(default = False)
config.usage.swap_snr_on_osd = ConfigYesNo(default = False)
def SpinnerOnOffChanged(configElement):
setSpinnerOnOff(int(configElement.value))
config.usage.show_spinner.addNotifier(SpinnerOnOffChanged)
def EnableTtCachingChanged(configElement):
setEnableTtCachingOnOff(int(configElement.value))
config.usage.enable_tt_caching.addNotifier(EnableTtCachingChanged)
def TunerTypePriorityOrderChanged(configElement):
setTunerTypePriorityOrder(int(configElement.value))
config.usage.alternatives_priority.addNotifier(TunerTypePriorityOrderChanged, immediate_feedback=False)
def PreferredTunerChanged(configElement):
setPreferredTuner(int(configElement.value))
config.usage.frontend_priority.addNotifier(PreferredTunerChanged)
config.usage.hide_zap_errors = ConfigYesNo(default = False)
config.usage.hide_ci_messages = ConfigYesNo(default = True)
config.usage.show_cryptoinfo = ConfigYesNo(default = True)
config.usage.show_eit_nownext = ConfigYesNo(default = True)
config.usage.show_vcr_scart = ConfigYesNo(default = False)
config.usage.show_update_disclaimer = ConfigYesNo(default = True)
config.usage.pic_resolution = ConfigSelection(default=None, choices=[(None, _('Same resolution as skin')),
('(720, 576)', '720x576'),
('(1280, 720)', '1280x720'),
('(1920, 1080)', '1920x1080')][:SystemInfo['HasFullHDSkinSupport'] and 4 or 3])
if SystemInfo['Fan']:
choicelist = [('off', _('Off')), ('on', _('On')), ('auto', _('Auto'))]
if os.path.exists('/proc/stb/fp/fan_choices'):
choicelist = [ x for x in choicelist if x[0] in open('/proc/stb/fp/fan_choices', 'r').read().strip().split(' ') ]
config.usage.fan = ConfigSelection(choicelist)
def fanChanged(configElement):
open(SystemInfo['Fan'], 'w').write(configElement.value)
config.usage.fan.addNotifier(fanChanged)
if SystemInfo['FanPWM']:
def fanSpeedChanged(configElement):
open(SystemInfo['FanPWM'], 'w').write(hex(configElement.value)[2:])
config.usage.fanspeed = ConfigSlider(default=127, increment=8, limits=(0, 255))
config.usage.fanspeed.addNotifier(fanSpeedChanged)
if SystemInfo['StandbyLED']:
def standbyLEDChanged(configElement):
open(SystemInfo['StandbyLED'], 'w').write(configElement.value and 'on' or 'off')
config.usage.standbyLED = ConfigYesNo(default = True)
config.usage.standbyLED.addNotifier(standbyLEDChanged)
if SystemInfo["PowerOffDisplay"]:
def powerOffDisplayChanged(configElement):
open(SystemInfo["PowerOffDisplay"], "w").write(configElement.value and "1" or "0")
config.usage.powerOffDisplay = ConfigYesNo(default = True)
config.usage.powerOffDisplay.addNotifier(powerOffDisplayChanged)
if SystemInfo['WakeOnLAN']:
def wakeOnLANChanged(configElement):
if 'fp' in SystemInfo['WakeOnLAN']:
open(SystemInfo['WakeOnLAN'], 'w').write(configElement.value and 'enable' or 'disable')
else:
open(SystemInfo['WakeOnLAN'], 'w').write(configElement.value and 'on' or 'off')
config.usage.wakeOnLAN = ConfigYesNo(default = False)
config.usage.wakeOnLAN.addNotifier(wakeOnLANChanged)
if SystemInfo["hasXcoreVFD"]:
def set12to8characterVFD(configElement):
open(SystemInfo["hasXcoreVFD"], "w").write(not configElement.value and "1" or "0")
config.usage.toggle12to8characterVFD = ConfigYesNo(default = False)
config.usage.toggle12to8characterVFD.addNotifier(set12to8characterVFD)
if SystemInfo["LcdLiveTVMode"]:
def setLcdLiveTVMode(configElement):
open(SystemInfo["LcdLiveTVMode"], "w").write(configElement.value)
config.usage.LcdLiveTVMode = ConfigSelection(default = "0", choices=[str(x) for x in range(0,9)])
config.usage.LcdLiveTVMode.addNotifier(setLcdLiveTVMode)
config.epg = ConfigSubsection()
config.epg.eit = ConfigYesNo(default = True)
config.epg.mhw = ConfigYesNo(default = False)
config.epg.freesat = ConfigYesNo(default = True)
config.epg.viasat = ConfigYesNo(default = True)
config.epg.netmed = ConfigYesNo(default = True)
config.epg.virgin = ConfigYesNo(default = False)
config.misc.showradiopic = ConfigYesNo(default = True)
def EpgSettingsChanged(configElement):
from enigma import eEPGCache
mask = 4294967295L
if not config.epg.eit.value:
mask &= ~(eEPGCache.NOWNEXT | eEPGCache.SCHEDULE | eEPGCache.SCHEDULE_OTHER)
if not config.epg.mhw.value:
mask &= ~eEPGCache.MHW
if not config.epg.freesat.value:
mask &= ~(eEPGCache.FREESAT_NOWNEXT | eEPGCache.FREESAT_SCHEDULE | eEPGCache.FREESAT_SCHEDULE_OTHER)
if not config.epg.viasat.value:
mask &= ~eEPGCache.VIASAT
if not config.epg.netmed.value:
mask &= ~(eEPGCache.NETMED_SCHEDULE | eEPGCache.NETMED_SCHEDULE_OTHER)
if not config.epg.virgin.value:
mask &= ~(eEPGCache.VIRGIN_NOWNEXT | eEPGCache.VIRGIN_SCHEDULE)
eEPGCache.getInstance().setEpgSources(mask)
config.epg.eit.addNotifier(EpgSettingsChanged)
config.epg.mhw.addNotifier(EpgSettingsChanged)
config.epg.freesat.addNotifier(EpgSettingsChanged)
config.epg.viasat.addNotifier(EpgSettingsChanged)
config.epg.netmed.addNotifier(EpgSettingsChanged)
config.epg.virgin.addNotifier(EpgSettingsChanged)
config.epg.histminutes = ConfigSelectionNumber(min=0, max=120, stepwidth=15, default=0, wraparound=True)
def EpgHistorySecondsChanged(configElement):
from enigma import eEPGCache
eEPGCache.getInstance().setEpgHistorySeconds(config.epg.histminutes.getValue() * 60)
config.epg.histminutes.addNotifier(EpgHistorySecondsChanged)
def setHDDStandby(configElement):
for hdd in harddiskmanager.HDDList():
hdd[1].setIdleTime(int(configElement.value))
config.usage.hdd_standby.addNotifier(setHDDStandby, immediate_feedback=False)
if SystemInfo['12V_Output']:
def set12VOutput(configElement):
Misc_Options.getInstance().set_12V_output(configElement.value == 'on' and 1 or 0)
config.usage.output_12V.addNotifier(set12VOutput, immediate_feedback=False)
config.usage.keymap = ConfigText(default=eEnv.resolve('${datadir}/enigma2/keymap.xml'))
config.usage.keytrans = ConfigText(default=eEnv.resolve('${datadir}/enigma2/keytranslation.xml'))
config.seek = ConfigSubsection()
config.seek.selfdefined_13 = ConfigNumber(default=15)
config.seek.selfdefined_46 = ConfigNumber(default=60)
config.seek.selfdefined_79 = ConfigNumber(default=300)
config.seek.speeds_forward = ConfigSet(default=[2,
4,
8,
16,
32,
64,
128], choices=[2,
4,
6,
8,
12,
16,
24,
32,
48,
64,
96,
128])
config.seek.speeds_backward = ConfigSet(default=[2,
4,
8,
16,
32,
64,
128], choices=[1,
2,
4,
6,
8,
12,
16,
24,
32,
48,
64,
96,
128])
config.seek.speeds_slowmotion = ConfigSet(default=[2, 4, 8], choices=[2,
4,
6,
8,
12,
16,
25])
config.seek.enter_forward = ConfigSelection(default='2', choices=['2',
'4',
'6',
'8',
'12',
'16',
'24',
'32',
'48',
'64',
'96',
'128'])
config.seek.enter_backward = ConfigSelection(default='1', choices=['1',
'2',
'4',
'6',
'8',
'12',
'16',
'24',
'32',
'48',
'64',
'96',
'128'])
config.seek.on_pause = ConfigSelection(default='play', choices=[('play', _('Play')), ('step', _('Single step (GOP)')), ('last', _('Last speed'))])
config.usage.timerlist_finished_timer_position = ConfigSelection(default='end', choices=[('beginning', _('At beginning')), ('end', _('At end'))])
def updateEnterForward(configElement):
if not configElement.value:
configElement.value = [2]
updateChoices(config.seek.enter_forward, configElement.value)
config.seek.speeds_forward.addNotifier(updateEnterForward, immediate_feedback=False)
def updateEnterBackward(configElement):
if not configElement.value:
configElement.value = [2]
updateChoices(config.seek.enter_backward, configElement.value)
config.seek.speeds_backward.addNotifier(updateEnterBackward, immediate_feedback=False)
def updateEraseSpeed(el):
eBackgroundFileEraser.getInstance().setEraseSpeed(int(el.value))
def updateEraseFlags(el):
eBackgroundFileEraser.getInstance().setEraseFlags(int(el.value))
config.misc.erase_speed = ConfigSelection(default="20", choices=[("10", _("10 MB/s")),
("20", _("20 MB/s")),
("50", _("50 MB/s")),
("100", _("100 MB/s"))])
config.misc.erase_speed.addNotifier(updateEraseSpeed, immediate_feedback=False)
config.misc.erase_flags = ConfigSelection(default='1', choices=[('0', _('Disable')), ('1', _('Internal hdd only')), ('3', _('Everywhere'))])
config.misc.erase_flags.addNotifier(updateEraseFlags, immediate_feedback=False)
if SystemInfo['ZapMode']:
def setZapmode(el):
open(SystemInfo['ZapMode'], 'w').write(el.value)
config.misc.zapmode = ConfigSelection(default='mute', choices=[('mute', _('Black screen')),
('hold', _('Hold screen')),
('mutetilllock', _('Black screen till locked')),
('holdtilllock', _('Hold till locked'))])
config.misc.zapmode.addNotifier(setZapmode, immediate_feedback=False)
config.usage.historymode = ConfigSelection(default='1', choices=[('0', _('Just zap')), ('1', _('Show menu'))])
if SystemInfo['VFD_scroll_repeats']:
def scroll_repeats(el):
open(SystemInfo['VFD_scroll_repeats'], 'w').write(el.value)
choicelist = []
for i in range(1, 11, 1):
choicelist.append(str(i))
config.usage.vfd_scroll_repeats = ConfigSelection(default='3', choices=choicelist)
config.usage.vfd_scroll_repeats.addNotifier(scroll_repeats, immediate_feedback=False)
if SystemInfo['VFD_scroll_delay']:
def scroll_delay(el):
open(SystemInfo['VFD_scroll_delay'], 'w').write(el.value)
choicelist = []
for i in range(0, 1001, 50):
choicelist.append(str(i))
config.usage.vfd_scroll_delay = ConfigSelection(default='150', choices=choicelist)
config.usage.vfd_scroll_delay.addNotifier(scroll_delay, immediate_feedback=False)
if SystemInfo['VFD_initial_scroll_delay']:
def initial_scroll_delay(el):
open(SystemInfo['VFD_initial_scroll_delay'], 'w').write(el.value)
choicelist = []
for i in range(0, 20001, 500):
choicelist.append(str(i))
config.usage.vfd_initial_scroll_delay = ConfigSelection(default='1000', choices=choicelist)
config.usage.vfd_initial_scroll_delay.addNotifier(initial_scroll_delay, immediate_feedback=False)
if SystemInfo['VFD_final_scroll_delay']:
def final_scroll_delay(el):
open(SystemInfo['VFD_final_scroll_delay'], 'w').write(el.value)
choicelist = []
for i in range(0, 20001, 500):
choicelist.append(str(i))
config.usage.vfd_final_scroll_delay = ConfigSelection(default='1000', choices=choicelist)
config.usage.vfd_final_scroll_delay.addNotifier(final_scroll_delay, immediate_feedback=False)
if SystemInfo['HasForceLNBOn']:
def forceLNBPowerChanged(configElement):
open(SystemInfo['HasForceLNBOn'], 'w').write(configElement.value)
config.misc.forceLnbPower = ConfigSelection(default='on', choices=[('on', _('yes')), ('off', _('no'))])
config.misc.forceLnbPower.addNotifier(forceLNBPowerChanged)
if SystemInfo['HasForceToneburst']:
def forceToneBurstChanged(configElement):
open(SystemInfo['HasForceToneburst'], 'w').write(configElement.value)
config.misc.forceToneBurst = ConfigSelection(default='enable', choices=[('enable', _('yes')), ('disable', _('no'))])
config.misc.forceToneBurst.addNotifier(forceToneBurstChanged)
if SystemInfo['HasBypassEdidChecking']:
def setHasBypassEdidChecking(configElement):
open(SystemInfo['HasBypassEdidChecking'], 'w').write(configElement.value)
config.av.bypassEdidChecking = ConfigSelection(default='00000000', choices=[('00000001', _('yes')), ('00000000', _('no'))])
config.av.bypassEdidChecking.addNotifier(setHasBypassEdidChecking)
if SystemInfo['HasColorspace']:
def setHaveColorspace(configElement):
open(SystemInfo['HasColorspace'], 'w').write(configElement.value)
if SystemInfo['HasColorspaceSimple']:
config.av.hdmicolorspace = ConfigSelection(default='Edid(Auto)', choices={'Edid(Auto)': _('Auto'),
'Hdmi_Rgb': _('RGB')})
else:
config.av.hdmicolorspace = ConfigSelection(default='auto', choices={'auto': _('auto'),
'rgb': _('rgb'),
'420': _('420'),
'422': _('422'),
'444': _('444')})
config.av.hdmicolorspace.addNotifier(setHaveColorspace)
if SystemInfo["HasColordepth"]:
def setHaveColordepth(configElement):
open(SystemInfo["HasColordepth"], "w").write(configElement.value)
config.av.hdmicolordepth = ConfigSelection(default = "auto", choices={"auto": _("Auto"), "8bit": _("8bit"), "10bit": _("10bit"), "12bit": _("12bit")})
config.av.hdmicolordepth.addNotifier(setHaveColordepth)
if SystemInfo["HasHDMIpreemphasis"]:
def setHDMIpreemphasis(configElement):
open(SystemInfo["HasHDMIpreemphasis"], "w").write(configElement.value)
config.av.hdmipreemphasis = ConfigSelection(default = "off", choices = [ ("on", _("yes")), ("off", _("no"))] )
config.av.hdmipreemphasis.addNotifier(setHDMIpreemphasis)
if SystemInfo["HasColorimetry"]:
def setColorimetry(configElement):
open(SystemInfo["HasColorimetry"], "w").write(configElement.value)
config.av.hdmicolorimetry = ConfigSelection(default = "auto", choices = [("auto", _("Auto")), ("bt2020ncl", _("BT 2020 NCL")), ("bt2020cl", _("BT 2020 CL")), ("bt709", _("BT 709"))])
config.av.hdmicolorimetry.addNotifier(setColorimetry)
config.subtitles = ConfigSubsection()
config.subtitles.ttx_subtitle_colors = ConfigSelection(default='1', choices=[('0', _('original')), ('1', _('white')), ('2', _('yellow'))])
config.subtitles.ttx_subtitle_original_position = ConfigYesNo(default = False)
config.subtitles.subtitle_position = ConfigSelection(choices=['0',
'10',
'20',
'30',
'40',
'50',
'60',
'70',
'80',
'90',
'100',
'150',
'200',
'250',
'300',
'350',
'400',
'450'], default='50')
config.subtitles.subtitle_alignment = ConfigSelection(choices=[('left', _('left')), ('center', _('center')), ('right', _('right'))], default='center')
config.subtitles.subtitle_rewrap = ConfigYesNo(default = False)
config.subtitles.colourise_dialogs = ConfigYesNo(default = False)
config.subtitles.subtitle_borderwidth = ConfigSelection(choices=['1',
'2',
'3',
'4',
'5'], default='3')
config.subtitles.subtitle_fontsize = ConfigSelection(choices=[ '%d' % x for x in range(16, 101) if not x % 2 ], default='40')
config.subtitles.showbackground = ConfigYesNo(default = False)
subtitle_delay_choicelist = []
for i in range(-900000, 1845000, 45000):
if i == 0:
subtitle_delay_choicelist.append(('0', _('No delay')))
else:
subtitle_delay_choicelist.append((str(i), _("%2.1f sec") % (i / 90000.)))
config.subtitles.subtitle_noPTSrecordingdelay = ConfigSelection(default='315000', choices=subtitle_delay_choicelist)
config.subtitles.dvb_subtitles_yellow = ConfigYesNo(default = False)
config.subtitles.dvb_subtitles_original_position = ConfigSelection(default='0', choices=[('0', _('Original')), ('1', _('Fixed')), ('2', _('Relative'))])
config.subtitles.dvb_subtitles_centered = ConfigYesNo(default = False)
config.subtitles.subtitle_bad_timing_delay = ConfigSelection(default='0', choices=subtitle_delay_choicelist)
config.subtitles.dvb_subtitles_backtrans = ConfigSelection(default='0', choices=[('0', _('No transparency')),
('25', '10%'),
('50', '20%'),
('75', '30%'),
('100', '40%'),
('125', '50%'),
('150', '60%'),
('175', '70%'),
('200', '80%'),
('225', '90%'),
('255', _('Full transparency'))])
config.subtitles.pango_subtitle_colors = ConfigSelection(default='1', choices=[('0', _('alternative')), ('1', _('white')), ('2', _('yellow'))])
config.subtitles.pango_subtitle_fontswitch = ConfigYesNo(default = True)
config.subtitles.pango_subtitles_delay = ConfigSelection(default='0', choices=subtitle_delay_choicelist)
config.subtitles.pango_subtitles_fps = ConfigSelection(default='1', choices=[('1', _('Original')),
('23976', _('23.976')),
('24000', _('24')),
('25000', _('25')),
('29970', _('29.97')),
('30000', _('30'))])
config.subtitles.pango_autoturnon = ConfigYesNo(default = True)
config.autolanguage = ConfigSubsection()
audio_language_choices = [('---', _('None')),
('orj dos ory org esl qaa und mis mul ORY ORJ Audio_ORJ', _('Original')),
('ara', _('Arabic')),
('eus baq', _('Basque')),
('bul', _('Bulgarian')),
('hrv', _('Croatian')),
('ces cze', _('Czech')),
('dan', _('Danish')),
('dut ndl', _('Dutch')),
('eng qaa', _('English')),
('est', _('Estonian')),
('fin', _('Finnish')),
('fra fre', _('French')),
('deu ger', _('German')),
('ell gre', _('Greek')),
('heb', _('Hebrew')),
('hun', _('Hungarian')),
('ind', _('Indonesian')),
('ita', _('Italian')),
('lav', _('Latvian')),
('lit', _('Lithuanian')),
('ltz', _('Luxembourgish')),
('nor', _('Norwegian')),
('pol', _('Polish')),
('por dub DUB', _('Portuguese')),
('fas per', _('Persian')),
('ron rum', _('Romanian')),
('rus', _('Russian')),
('srp', _('Serbian')),
('slk slo', _('Slovak')),
('slv', _('Slovenian')),
('spa', _('Spanish')),
('swe', _('Swedish')),
('tha', _('Thai')),
('tur Audio_TUR', _('Turkish')),
('ukr Ukr', _('Ukrainian'))]
def setEpgLanguage(configElement):
eServiceEvent.setEPGLanguage(configElement.value)
config.autolanguage.audio_epglanguage = ConfigSelection(audio_language_choices[:1] + audio_language_choices[2:], default='---')
config.autolanguage.audio_epglanguage.addNotifier(setEpgLanguage)
def setEpgLanguageAlternative(configElement):
eServiceEvent.setEPGLanguageAlternative(configElement.value)
config.autolanguage.audio_epglanguage_alternative = ConfigSelection(audio_language_choices[:1] + audio_language_choices[2:], default='---')
config.autolanguage.audio_epglanguage_alternative.addNotifier(setEpgLanguageAlternative)
config.autolanguage.audio_autoselect1 = ConfigSelection(choices=audio_language_choices, default='---')
config.autolanguage.audio_autoselect2 = ConfigSelection(choices=audio_language_choices, default='---')
config.autolanguage.audio_autoselect3 = ConfigSelection(choices=audio_language_choices, default='---')
config.autolanguage.audio_autoselect4 = ConfigSelection(choices=audio_language_choices, default='---')
config.autolanguage.audio_defaultac3 = ConfigYesNo(default = False)
config.autolanguage.audio_defaultddp = ConfigYesNo(default = False)
config.autolanguage.audio_usecache = ConfigYesNo(default = True)
subtitle_language_choices = audio_language_choices[:1] + audio_language_choices[2:]
config.autolanguage.subtitle_autoselect1 = ConfigSelection(choices=subtitle_language_choices, default='---')
config.autolanguage.subtitle_autoselect2 = ConfigSelection(choices=subtitle_language_choices, default='---')
config.autolanguage.subtitle_autoselect3 = ConfigSelection(choices=subtitle_language_choices, default='---')
config.autolanguage.subtitle_autoselect4 = ConfigSelection(choices=subtitle_language_choices, default='---')
config.autolanguage.subtitle_hearingimpaired = ConfigYesNo(default = False)
config.autolanguage.subtitle_defaultimpaired = ConfigYesNo(default = False)
config.autolanguage.subtitle_defaultdvb = ConfigYesNo(default = False)
config.autolanguage.subtitle_usecache = ConfigYesNo(default = True)
config.autolanguage.equal_languages = ConfigSelection(default='15', choices=[('0', _('None')),
('1', '1'),
('2', '2'),
('3', '1,2'),
('4', '3'),
('5', '1,3'),
('6', '2,3'),
('7', '1,2,3'),
('8', '4'),
('9', '1,4'),
('10', '2,4'),
('11', '1,2,4'),
('12', '3,4'),
('13', '1,3,4'),
('14', '2,3,4'),
('15', _('All'))])
config.streaming = ConfigSubsection()
config.streaming.stream_ecm = ConfigYesNo(default = False)
config.streaming.descramble = ConfigYesNo(default = True)
config.streaming.descramble_client = ConfigYesNo(default = False)
config.streaming.stream_eit = ConfigYesNo(default = True)
config.streaming.stream_ait = ConfigYesNo(default = True)
config.streaming.authentication = ConfigYesNo(default = False)
config.mediaplayer = ConfigSubsection()
config.mediaplayer.useAlternateUserAgent = ConfigYesNo(default = False)
config.mediaplayer.alternateUserAgent = ConfigText(default='')
def updateChoices(sel, choices):
if choices:
defval = None
val = int(sel.value)
if val not in choices:
tmp = choices[:]
tmp.reverse()
for x in tmp:
if x < val:
defval = str(x)
break
sel.setChoices(map(str, choices), defval)
def preferredPath(path):
if config.usage.setup_level.index < 2 or path == '<default>' or not path:
return None
elif path == '<current>':
return config.movielist.last_videodir.value
elif path == '<timer>':
return config.movielist.last_timer_videodir.value
else:
return path
def preferredTimerPath():
return preferredPath(config.usage.timer_path.value)
def preferredInstantRecordPath():
return preferredPath(config.usage.instantrec_path.value)
def defaultMoviePath():
return defaultRecordingLocation(config.usage.default_path.value)
|
yaroslavvb/tensorflow
|
refs/heads/master
|
tensorflow/python/framework/device_test.py
|
106
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import device
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class DeviceTest(test_util.TensorFlowTestCase):
def testEmpty(self):
d = device.DeviceSpec()
self.assertEquals("", d.to_string())
d.parse_from_string("")
self.assertEquals("", d.to_string())
def testConstructor(self):
d = device.DeviceSpec(job="j", replica=0, task=1,
device_type="CPU", device_index=2)
self.assertEqual("j", d.job)
self.assertEqual(0, d.replica)
self.assertEqual(1, d.task)
self.assertEqual("CPU", d.device_type)
self.assertEqual(2, d.device_index)
self.assertEqual("/job:j/replica:0/task:1/device:CPU:2", d.to_string())
d = device.DeviceSpec(device_type="GPU", device_index=0)
self.assertEquals("/device:GPU:0", d.to_string())
def testto_string(self):
d = device.DeviceSpec()
d.job = "foo"
self.assertEquals("/job:foo", d.to_string())
d.task = 3
self.assertEquals("/job:foo/task:3", d.to_string())
d.device_type = "CPU"
d.device_index = 0
self.assertEquals("/job:foo/task:3/device:CPU:0", d.to_string())
d.task = None
d.replica = 12
self.assertEquals("/job:foo/replica:12/device:CPU:0", d.to_string())
d.device_type = "GPU"
d.device_index = 2
self.assertEquals("/job:foo/replica:12/device:GPU:2", d.to_string())
d.device_type = "CPU"
d.device_index = 1
self.assertEquals("/job:foo/replica:12/device:CPU:1", d.to_string())
d.device_type = None
d.device_index = None
d.cpu = None
self.assertEquals("/job:foo/replica:12", d.to_string())
# Test wildcard
d = device.DeviceSpec(job="foo", replica=12, task=3, device_type="GPU")
self.assertEquals("/job:foo/replica:12/task:3/device:GPU:*", d.to_string())
def testParse(self):
d = device.DeviceSpec()
d.parse_from_string("/job:foo/replica:0")
self.assertEquals("/job:foo/replica:0", d.to_string())
d.parse_from_string("/replica:1/task:0/cpu:0")
self.assertEquals("/replica:1/task:0/device:CPU:0", d.to_string())
d.parse_from_string("/replica:1/task:0/device:CPU:0")
self.assertEquals("/replica:1/task:0/device:CPU:0", d.to_string())
d.parse_from_string("/job:muu/gpu:2")
self.assertEquals("/job:muu/device:GPU:2", d.to_string())
with self.assertRaises(Exception) as e:
d.parse_from_string("/job:muu/gpu:2/cpu:0")
self.assertTrue("Cannot specify multiple device" in str(e.exception))
def testFromString(self):
d = device.DeviceSpec.from_string("/job:foo/replica:0")
self.assertEquals("/job:foo/replica:0", d.to_string())
with self.assertRaises(Exception) as e:
d = device.DeviceSpec.from_string("/job:muu/gpu:2/cpu:0")
self.assertTrue("Cannot specify multiple device" in str(e.exception))
d = device.DeviceSpec.from_string("/job:foo/replica:0/task:3/cpu:*")
self.assertEquals(None, d.device_index)
d = device.DeviceSpec.from_string("/job:foo/replica:0/task:3/gpu:7")
self.assertEquals(7, d.device_index)
d = device.DeviceSpec.from_string("/job:foo/replica:0/task:3/device:GPU:7")
self.assertEquals(7, d.device_index)
def testMerge(self):
d = device.DeviceSpec.from_string("/job:foo/replica:0")
self.assertEquals("/job:foo/replica:0", d.to_string())
d.merge_from(device.DeviceSpec.from_string("/task:1/gpu:2"))
self.assertEquals("/job:foo/replica:0/task:1/device:GPU:2", d.to_string())
d = device.DeviceSpec()
d.merge_from(device.DeviceSpec.from_string("/task:1/cpu:0"))
self.assertEquals("/task:1/device:CPU:0", d.to_string())
d.merge_from(device.DeviceSpec.from_string("/job:boo/gpu:0"))
self.assertEquals("/job:boo/task:1/device:GPU:0", d.to_string())
d.merge_from(device.DeviceSpec.from_string("/job:muu/cpu:2"))
self.assertEquals("/job:muu/task:1/device:CPU:2", d.to_string())
d.merge_from(device.DeviceSpec.from_string(
"/job:muu/device:MyFunnyDevice:2"))
self.assertEquals("/job:muu/task:1/device:MyFunnyDevice:2", d.to_string())
def testCanonicalName(self):
self.assertEqual("/job:foo/replica:0",
device.canonical_name("/job:foo/replica:0"))
self.assertEqual("/job:foo/replica:0",
device.canonical_name("/replica:0/job:foo"))
self.assertEqual("/job:foo/replica:0/task:0",
device.canonical_name("/job:foo/replica:0/task:0"))
self.assertEqual("/job:foo/replica:0/task:0",
device.canonical_name("/job:foo/task:0/replica:0"))
self.assertEqual("/device:CPU:0",
device.canonical_name("/device:CPU:0"))
self.assertEqual("/device:GPU:2",
device.canonical_name("/device:GPU:2"))
self.assertEqual("/job:foo/replica:0/task:0/device:GPU:0",
device.canonical_name(
"/job:foo/replica:0/task:0/gpu:0"))
self.assertEqual("/job:foo/replica:0/task:0/device:GPU:0",
device.canonical_name(
"/gpu:0/task:0/replica:0/job:foo"))
def testCheckValid(self):
device.check_valid("/job:foo/replica:0")
with self.assertRaises(Exception) as e:
device.check_valid("/job:j/replica:foo")
self.assertTrue("invalid literal for int" in str(e.exception))
with self.assertRaises(Exception) as e:
device.check_valid("/job:j/task:bar")
self.assertTrue("invalid literal for int" in str(e.exception))
with self.assertRaises(Exception) as e:
device.check_valid("/bar:muu/baz:2")
self.assertTrue("Unknown attribute: 'bar'" in str(e.exception))
with self.assertRaises(Exception) as e:
device.check_valid("/cpu:0/gpu:2")
self.assertTrue("Cannot specify multiple device" in str(e.exception))
if __name__ == "__main__":
googletest.main()
|
350dotorg/Django
|
refs/heads/master
|
django/conf/locale/hi/formats.py
|
81
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'g:i:s A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
|
whs/django
|
refs/heads/master
|
tests/handlers/views.py
|
30
|
from django.core.exceptions import SuspiciousOperation
from django.db import connection, transaction
from django.http import HttpResponse, StreamingHttpResponse
from django.views.decorators.csrf import csrf_exempt
try:
from http import HTTPStatus
except ImportError: # Python < 3.5
pass
def regular(request):
return HttpResponse(b"regular content")
def streaming(request):
return StreamingHttpResponse([b"streaming", b" ", b"content"])
def in_transaction(request):
return HttpResponse(str(connection.in_atomic_block))
@transaction.non_atomic_requests
def not_in_transaction(request):
return HttpResponse(str(connection.in_atomic_block))
def suspicious(request):
raise SuspiciousOperation('dubious')
@csrf_exempt
def malformed_post(request):
request.POST
return HttpResponse()
def httpstatus_enum(request):
return HttpResponse(status=HTTPStatus.OK)
|
pswaminathan/python_efficiency_tweaks
|
refs/heads/master
|
plots/plot_string_subst.py
|
1
|
# Plotting performance of string_subst_.py scripts
# mean values with variances as error bars
import matplotlib.pyplot as plt
x = [1, 2, 3]
y_1 = [0.0676,0.671266666666667,6.5625]
y_2 = [0.659833333333333,6.68196666666667,74.8843333333333]
y_3 = [0.0668333333333333,0.687233333333333,6.58736666666667]
y_1_err = [0.000000180000000000002,0.00000532333333333352,0.00000408999999999994]
y_2_err = [0.00000758333333333342,0.00673880333333339,0.291411603333335]
y_3_err = [0.000000943333333333331,0.00109408333333333,0.00257057333333333]
x_labels = ["n = 10^6", "n = 10^7", "n = 10^8"]
plt.figure()
plt.errorbar(x, y_1, yerr=y_1_err, fmt='-x')
plt.errorbar(x, y_2, yerr=y_2_err, fmt='-^')
plt.errorbar(x, y_3, yerr=y_3_err, fmt='-o')
plt.xticks(x, x_labels)
plt.xlim([0,4])
plt.xlabel('size n')
plt.ylabel('cpu time in sec')
plt.yscale('log')
plt.title('String substitution')
plt.legend(['string_subst_1.py', 'string_subst_2.py', 'string_subst_3.py'], loc='upper left')
#plt.show()
plt.savefig('PNGs/string_subst.png')
|
jean/sentry
|
refs/heads/master
|
tests/sentry/deletions/test_apiapplication.py
|
4
|
from __future__ import absolute_import
from sentry.models import ApiApplication, ApiGrant, ApiToken, ScheduledDeletion
from sentry.tasks.deletion import run_deletion
from sentry.testutils import TestCase
class DeleteApiApplicationTest(TestCase):
def test_simple(self):
app = ApiApplication.objects.create(
owner=self.user,
)
ApiToken.objects.create(
application=app,
user=self.user,
scopes=0,
)
ApiGrant.objects.create(
application=app,
user=self.user,
scopes=0,
redirect_uri='http://example.com',
)
deletion = ScheduledDeletion.schedule(app, days=0)
deletion.update(in_progress=True)
with self.tasks():
run_deletion(deletion.id)
assert not ApiApplication.objects.filter(id=app.id).exists()
assert not ApiGrant.objects.filter(application=app).exists()
assert not ApiToken.objects.filter(application=app).exists()
|
jtg-gg/blink
|
refs/heads/dev12-m41
|
Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
|
10
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A helper class for reading in and dealing with tests expectations
for layout tests.
"""
import logging
import re
from webkitpy.layout_tests.models.test_configuration import TestConfigurationConverter
_log = logging.getLogger(__name__)
# Test expectation and specifier constants.
#
# FIXME: range() starts with 0 which makes if expectation checks harder
# as PASS is 0.
(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, LEAK, SKIP, WONTFIX,
SLOW, REBASELINE, NEEDS_REBASELINE, NEEDS_MANUAL_REBASELINE, MISSING, FLAKY, NOW, NONE) = range(19)
# FIXME: Perhas these two routines should be part of the Port instead?
BASELINE_SUFFIX_LIST = ('png', 'wav', 'txt')
WEBKIT_BUG_PREFIX = 'webkit.org/b/'
CHROMIUM_BUG_PREFIX = 'crbug.com/'
V8_BUG_PREFIX = 'code.google.com/p/v8/issues/detail?id='
NAMED_BUG_PREFIX = 'Bug('
MISSING_KEYWORD = 'Missing'
NEEDS_REBASELINE_KEYWORD = 'NeedsRebaseline'
NEEDS_MANUAL_REBASELINE_KEYWORD = 'NeedsManualRebaseline'
class ParseError(Exception):
def __init__(self, warnings):
super(ParseError, self).__init__()
self.warnings = warnings
def __str__(self):
return '\n'.join(map(str, self.warnings))
def __repr__(self):
return 'ParseError(warnings=%s)' % self.warnings
class TestExpectationParser(object):
"""Provides parsing facilities for lines in the test_expectation.txt file."""
# FIXME: Rename these to *_KEYWORD as in MISSING_KEYWORD above, but make the case studdly-caps to match the actual file contents.
REBASELINE_MODIFIER = 'rebaseline'
NEEDS_REBASELINE_MODIFIER = 'needsrebaseline'
NEEDS_MANUAL_REBASELINE_MODIFIER = 'needsmanualrebaseline'
PASS_EXPECTATION = 'pass'
SKIP_MODIFIER = 'skip'
SLOW_MODIFIER = 'slow'
WONTFIX_MODIFIER = 'wontfix'
TIMEOUT_EXPECTATION = 'timeout'
MISSING_BUG_WARNING = 'Test lacks BUG specifier.'
def __init__(self, port, full_test_list, is_lint_mode):
self._port = port
self._test_configuration_converter = TestConfigurationConverter(set(port.all_test_configurations()), port.configuration_specifier_macros())
self._full_test_list = full_test_list
self._is_lint_mode = is_lint_mode
def parse(self, filename, expectations_string):
expectation_lines = []
line_number = 0
for line in expectations_string.split("\n"):
line_number += 1
test_expectation = self._tokenize_line(filename, line, line_number)
self._parse_line(test_expectation)
expectation_lines.append(test_expectation)
return expectation_lines
def _create_expectation_line(self, test_name, expectations, file_name):
expectation_line = TestExpectationLine()
expectation_line.original_string = test_name
expectation_line.name = test_name
expectation_line.filename = file_name
expectation_line.expectations = expectations
return expectation_line
def expectation_line_for_test(self, test_name, expectations):
expectation_line = self._create_expectation_line(test_name, expectations, '<Bot TestExpectations>')
self._parse_line(expectation_line)
return expectation_line
def expectation_for_skipped_test(self, test_name):
if not self._port.test_exists(test_name):
_log.warning('The following test %s from the Skipped list doesn\'t exist' % test_name)
expectation_line = self._create_expectation_line(test_name, [TestExpectationParser.PASS_EXPECTATION], '<Skipped file>')
expectation_line.expectations = [TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER]
expectation_line.is_skipped_outside_expectations_file = True
self._parse_line(expectation_line)
return expectation_line
def _parse_line(self, expectation_line):
if not expectation_line.name:
return
if not self._check_test_exists(expectation_line):
return
expectation_line.is_file = self._port.test_isfile(expectation_line.name)
if expectation_line.is_file:
expectation_line.path = expectation_line.name
else:
expectation_line.path = self._port.normalize_test_name(expectation_line.name)
self._collect_matching_tests(expectation_line)
self._parse_specifiers(expectation_line)
self._parse_expectations(expectation_line)
def _parse_specifiers(self, expectation_line):
if self._is_lint_mode:
self._lint_line(expectation_line)
parsed_specifiers = set([specifier.lower() for specifier in expectation_line.specifiers])
expectation_line.matching_configurations = self._test_configuration_converter.to_config_set(parsed_specifiers, expectation_line.warnings)
def _lint_line(self, expectation_line):
expectations = [expectation.lower() for expectation in expectation_line.expectations]
if not expectation_line.bugs and self.WONTFIX_MODIFIER not in expectations:
expectation_line.warnings.append(self.MISSING_BUG_WARNING)
if self.REBASELINE_MODIFIER in expectations:
expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.')
if self.NEEDS_REBASELINE_MODIFIER in expectations or self.NEEDS_MANUAL_REBASELINE_MODIFIER in expectations:
for test in expectation_line.matching_tests:
if self._port.reference_files(test):
expectation_line.warnings.append('A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline')
specifiers = [specifier.lower() for specifier in expectation_line.specifiers]
if (self.REBASELINE_MODIFIER in expectations or self.NEEDS_REBASELINE_MODIFIER in expectations) and ('debug' in specifiers or 'release' in specifiers):
expectation_line.warnings.append('A test cannot be rebaselined for Debug/Release.')
def _parse_expectations(self, expectation_line):
result = set()
for part in expectation_line.expectations:
expectation = TestExpectations.expectation_from_string(part)
if expectation is None: # Careful, PASS is currently 0.
expectation_line.warnings.append('Unsupported expectation: %s' % part)
continue
result.add(expectation)
expectation_line.parsed_expectations = result
def _check_test_exists(self, expectation_line):
# WebKit's way of skipping tests is to add a -disabled suffix.
# So we should consider the path existing if the path or the
# -disabled version exists.
if not self._port.test_exists(expectation_line.name) and not self._port.test_exists(expectation_line.name + '-disabled'):
# Log a warning here since you hit this case any
# time you update TestExpectations without syncing
# the LayoutTests directory
expectation_line.warnings.append('Path does not exist.')
return False
return True
def _collect_matching_tests(self, expectation_line):
"""Convert the test specification to an absolute, normalized
path and make sure directories end with the OS path separator."""
# FIXME: full_test_list can quickly contain a big amount of
# elements. We should consider at some point to use a more
# efficient structure instead of a list. Maybe a dictionary of
# lists to represent the tree of tests, leaves being test
# files and nodes being categories.
if not self._full_test_list:
expectation_line.matching_tests = [expectation_line.path]
return
if not expectation_line.is_file:
# this is a test category, return all the tests of the category.
expectation_line.matching_tests = [test for test in self._full_test_list if test.startswith(expectation_line.path)]
return
# this is a test file, do a quick check if it's in the
# full test suite.
if expectation_line.path in self._full_test_list:
expectation_line.matching_tests.append(expectation_line.path)
# FIXME: Update the original specifiers and remove this once the old syntax is gone.
_configuration_tokens_list = [
'Mac', 'SnowLeopard', 'Lion', 'Retina', 'MountainLion', 'Mavericks',
'Win', 'XP', 'Win7',
'Linux',
'Android',
'Release',
'Debug',
]
_configuration_tokens = dict((token, token.upper()) for token in _configuration_tokens_list)
_inverted_configuration_tokens = dict((value, name) for name, value in _configuration_tokens.iteritems())
# FIXME: Update the original specifiers list and remove this once the old syntax is gone.
_expectation_tokens = {
'Crash': 'CRASH',
'Leak': 'LEAK',
'Failure': 'FAIL',
'ImageOnlyFailure': 'IMAGE',
MISSING_KEYWORD: 'MISSING',
'Pass': 'PASS',
'Rebaseline': 'REBASELINE',
NEEDS_REBASELINE_KEYWORD: 'NEEDSREBASELINE',
NEEDS_MANUAL_REBASELINE_KEYWORD: 'NEEDSMANUALREBASELINE',
'Skip': 'SKIP',
'Slow': 'SLOW',
'Timeout': 'TIMEOUT',
'WontFix': 'WONTFIX',
}
_inverted_expectation_tokens = dict([(value, name) for name, value in _expectation_tokens.iteritems()] +
[('TEXT', 'Failure'), ('IMAGE+TEXT', 'Failure'), ('AUDIO', 'Failure')])
# FIXME: Seems like these should be classmethods on TestExpectationLine instead of TestExpectationParser.
@classmethod
def _tokenize_line(cls, filename, expectation_string, line_number):
"""Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance using the old format.
The new format for a test expectation line is:
[[bugs] [ "[" <configuration specifiers> "]" <name> [ "[" <expectations> "]" ["#" <comment>]
Any errant whitespace is not preserved.
"""
expectation_line = TestExpectationLine()
expectation_line.original_string = expectation_string
expectation_line.filename = filename
expectation_line.line_numbers = str(line_number)
comment_index = expectation_string.find("#")
if comment_index == -1:
comment_index = len(expectation_string)
else:
expectation_line.comment = expectation_string[comment_index + 1:]
remaining_string = re.sub(r"\s+", " ", expectation_string[:comment_index].strip())
if len(remaining_string) == 0:
return expectation_line
# special-case parsing this so that we fail immediately instead of treating this as a test name
if remaining_string.startswith('//'):
expectation_line.warnings = ['use "#" instead of "//" for comments']
return expectation_line
bugs = []
specifiers = []
name = None
expectations = []
warnings = []
has_unrecognized_expectation = False
tokens = remaining_string.split()
state = 'start'
for token in tokens:
if (token.startswith(WEBKIT_BUG_PREFIX) or
token.startswith(CHROMIUM_BUG_PREFIX) or
token.startswith(V8_BUG_PREFIX) or
token.startswith(NAMED_BUG_PREFIX)):
if state != 'start':
warnings.append('"%s" is not at the start of the line.' % token)
break
if token.startswith(WEBKIT_BUG_PREFIX):
bugs.append(token)
elif token.startswith(CHROMIUM_BUG_PREFIX):
bugs.append(token)
elif token.startswith(V8_BUG_PREFIX):
bugs.append(token)
else:
match = re.match('Bug\((\w+)\)$', token)
if not match:
warnings.append('unrecognized bug identifier "%s"' % token)
break
else:
bugs.append(token)
elif token == '[':
if state == 'start':
state = 'configuration'
elif state == 'name_found':
state = 'expectations'
else:
warnings.append('unexpected "["')
break
elif token == ']':
if state == 'configuration':
state = 'name'
elif state == 'expectations':
state = 'done'
else:
warnings.append('unexpected "]"')
break
elif token in ('//', ':', '='):
warnings.append('"%s" is not legal in the new TestExpectations syntax.' % token)
break
elif state == 'configuration':
specifiers.append(cls._configuration_tokens.get(token, token))
elif state == 'expectations':
if token not in cls._expectation_tokens:
has_unrecognized_expectation = True
warnings.append('Unrecognized expectation "%s"' % token)
else:
expectations.append(cls._expectation_tokens.get(token, token))
elif state == 'name_found':
warnings.append('expecting "[", "#", or end of line instead of "%s"' % token)
break
else:
name = token
state = 'name_found'
if not warnings:
if not name:
warnings.append('Did not find a test name.')
elif state not in ('name_found', 'done'):
warnings.append('Missing a "]"')
if 'WONTFIX' in expectations and 'SKIP' not in expectations:
expectations.append('SKIP')
if ('SKIP' in expectations or 'WONTFIX' in expectations) and len(set(expectations) - set(['SKIP', 'WONTFIX'])):
warnings.append('A test marked Skip or WontFix must not have other expectations.')
if not expectations and not has_unrecognized_expectation:
warnings.append('Missing expectations.')
expectation_line.bugs = bugs
expectation_line.specifiers = specifiers
expectation_line.expectations = expectations
expectation_line.name = name
expectation_line.warnings = warnings
return expectation_line
@classmethod
def _split_space_separated(cls, space_separated_string):
"""Splits a space-separated string into an array."""
return [part.strip() for part in space_separated_string.strip().split(' ')]
class TestExpectationLine(object):
"""Represents a line in test expectations file."""
def __init__(self):
"""Initializes a blank-line equivalent of an expectation."""
self.original_string = None
self.filename = None # this is the path to the expectations file for this line
self.line_numbers = "0"
self.name = None # this is the path in the line itself
self.path = None # this is the normpath of self.name
self.bugs = []
self.specifiers = []
self.parsed_specifiers = []
self.matching_configurations = set()
self.expectations = []
self.parsed_expectations = set()
self.comment = None
self.matching_tests = []
self.warnings = []
self.is_skipped_outside_expectations_file = False
def __eq__(self, other):
return (self.original_string == other.original_string
and self.filename == other.filename
and self.line_numbers == other.line_numbers
and self.name == other.name
and self.path == other.path
and self.bugs == other.bugs
and self.specifiers == other.specifiers
and self.parsed_specifiers == other.parsed_specifiers
and self.matching_configurations == other.matching_configurations
and self.expectations == other.expectations
and self.parsed_expectations == other.parsed_expectations
and self.comment == other.comment
and self.matching_tests == other.matching_tests
and self.warnings == other.warnings
and self.is_skipped_outside_expectations_file == other.is_skipped_outside_expectations_file)
def is_invalid(self):
return bool(self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING])
def is_flaky(self):
return len(self.parsed_expectations) > 1
def is_whitespace_or_comment(self):
return bool(re.match("^\s*$", self.original_string.split('#')[0]))
@staticmethod
def create_passing_expectation(test):
expectation_line = TestExpectationLine()
expectation_line.name = test
expectation_line.path = test
expectation_line.parsed_expectations = set([PASS])
expectation_line.expectations = set(['PASS'])
expectation_line.matching_tests = [test]
return expectation_line
@staticmethod
def merge_expectation_lines(line1, line2, model_all_expectations):
"""Merges the expectations of line2 into line1 and returns a fresh object."""
if line1 is None:
return line2
if line2 is None:
return line1
if model_all_expectations and line1.filename != line2.filename:
return line2
# Don't merge original_string or comment.
result = TestExpectationLine()
# We only care about filenames when we're linting, in which case the filenames are the same.
# Not clear that there's anything better to do when not linting and the filenames are different.
if model_all_expectations:
result.filename = line2.filename
result.line_numbers = line1.line_numbers + "," + line2.line_numbers
result.name = line1.name
result.path = line1.path
result.parsed_expectations = set(line1.parsed_expectations) | set(line2.parsed_expectations)
result.expectations = list(set(line1.expectations) | set(line2.expectations))
result.bugs = list(set(line1.bugs) | set(line2.bugs))
result.specifiers = list(set(line1.specifiers) | set(line2.specifiers))
result.parsed_specifiers = list(set(line1.parsed_specifiers) | set(line2.parsed_specifiers))
result.matching_configurations = set(line1.matching_configurations) | set(line2.matching_configurations)
result.matching_tests = list(list(set(line1.matching_tests) | set(line2.matching_tests)))
result.warnings = list(set(line1.warnings) | set(line2.warnings))
result.is_skipped_outside_expectations_file = line1.is_skipped_outside_expectations_file or line2.is_skipped_outside_expectations_file
return result
def to_string(self, test_configuration_converter, include_specifiers=True, include_expectations=True, include_comment=True):
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
if self.is_invalid():
return self.original_string or ''
if self.name is None:
return '' if self.comment is None else "#%s" % self.comment
if test_configuration_converter and self.bugs:
specifiers_list = test_configuration_converter.to_specifiers_list(self.matching_configurations)
result = []
for specifiers in specifiers_list:
# FIXME: this is silly that we join the specifiers and then immediately split them.
specifiers = self._serialize_parsed_specifiers(test_configuration_converter, specifiers).split()
expectations = self._serialize_parsed_expectations(parsed_expectation_to_string).split()
result.append(self._format_line(self.bugs, specifiers, self.name, expectations, self.comment))
return "\n".join(result) if result else None
return self._format_line(self.bugs, self.specifiers, self.name, self.expectations, self.comment,
include_specifiers, include_expectations, include_comment)
def to_csv(self):
# Note that this doesn't include the comments.
return '%s,%s,%s,%s' % (self.name, ' '.join(self.bugs), ' '.join(self.specifiers), ' '.join(self.expectations))
def _serialize_parsed_expectations(self, parsed_expectation_to_string):
result = []
for index in TestExpectations.EXPECTATIONS.values():
if index in self.parsed_expectations:
result.append(parsed_expectation_to_string[index])
return ' '.join(result)
def _serialize_parsed_specifiers(self, test_configuration_converter, specifiers):
result = []
result.extend(sorted(self.parsed_specifiers))
result.extend(test_configuration_converter.specifier_sorter().sort_specifiers(specifiers))
return ' '.join(result)
@staticmethod
def _filter_redundant_expectations(expectations):
if set(expectations) == set(['Pass', 'Skip']):
return ['Skip']
if set(expectations) == set(['Pass', 'Slow']):
return ['Slow']
return expectations
@staticmethod
def _format_line(bugs, specifiers, name, expectations, comment, include_specifiers=True, include_expectations=True, include_comment=True):
new_specifiers = []
new_expectations = []
for specifier in specifiers:
# FIXME: Make this all work with the mixed-cased specifiers (e.g. WontFix, Slow, etc).
specifier = specifier.upper()
new_specifiers.append(TestExpectationParser._inverted_configuration_tokens.get(specifier, specifier))
for expectation in expectations:
expectation = expectation.upper()
new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(expectation, expectation))
result = ''
if include_specifiers and (bugs or new_specifiers):
if bugs:
result += ' '.join(bugs) + ' '
if new_specifiers:
result += '[ %s ] ' % ' '.join(new_specifiers)
result += name
if include_expectations and new_expectations:
new_expectations = TestExpectationLine._filter_redundant_expectations(new_expectations)
result += ' [ %s ]' % ' '.join(sorted(set(new_expectations)))
if include_comment and comment is not None:
result += " #%s" % comment
return result
# FIXME: Refactor API to be a proper CRUD.
class TestExpectationsModel(object):
"""Represents relational store of all expectations and provides CRUD semantics to manage it."""
def __init__(self, shorten_filename=None):
# Maps a test to its list of expectations.
self._test_to_expectations = {}
# Maps a test to list of its specifiers (string values)
self._test_to_specifiers = {}
# Maps a test to a TestExpectationLine instance.
self._test_to_expectation_line = {}
self._expectation_to_tests = self._dict_of_sets(TestExpectations.EXPECTATIONS)
self._timeline_to_tests = self._dict_of_sets(TestExpectations.TIMELINES)
self._result_type_to_tests = self._dict_of_sets(TestExpectations.RESULT_TYPES)
self._shorten_filename = shorten_filename or (lambda x: x)
def _merge_test_map(self, self_map, other_map):
for test in other_map:
new_expectations = set(other_map[test])
if test in self_map:
new_expectations |= set(self_map[test])
self_map[test] = list(new_expectations) if isinstance(other_map[test], list) else new_expectations
def _merge_dict_of_sets(self, self_dict, other_dict):
for key in other_dict:
self_dict[key] |= other_dict[key]
def merge_model(self, other):
self._merge_test_map(self._test_to_expectations, other._test_to_expectations)
for test, line in other._test_to_expectation_line.items():
if test in self._test_to_expectation_line:
line = TestExpectationLine.merge_expectation_lines(self._test_to_expectation_line[test], line, model_all_expectations=False)
self._test_to_expectation_line[test] = line
self._merge_dict_of_sets(self._expectation_to_tests, other._expectation_to_tests)
self._merge_dict_of_sets(self._timeline_to_tests, other._timeline_to_tests)
self._merge_dict_of_sets(self._result_type_to_tests, other._result_type_to_tests)
def _dict_of_sets(self, strings_to_constants):
"""Takes a dict of strings->constants and returns a dict mapping
each constant to an empty set."""
d = {}
for c in strings_to_constants.values():
d[c] = set()
return d
def get_test_set(self, expectation, include_skips=True):
tests = self._expectation_to_tests[expectation]
if not include_skips:
tests = tests - self.get_test_set(SKIP)
return tests
def get_test_set_for_keyword(self, keyword):
expectation_enum = TestExpectations.EXPECTATIONS.get(keyword.lower(), None)
if expectation_enum is not None:
return self._expectation_to_tests[expectation_enum]
matching_tests = set()
for test, specifiers in self._test_to_specifiers.iteritems():
if keyword.lower() in specifiers:
matching_tests.add(test)
return matching_tests
def get_tests_with_result_type(self, result_type):
return self._result_type_to_tests[result_type]
def get_tests_with_timeline(self, timeline):
return self._timeline_to_tests[timeline]
def has_test(self, test):
return test in self._test_to_expectation_line
def get_expectation_line(self, test):
return self._test_to_expectation_line.get(test)
def get_expectations(self, test):
return self._test_to_expectations[test]
def get_expectations_string(self, test):
"""Returns the expectatons for the given test as an uppercase string.
If there are no expectations for the test, then "PASS" is returned."""
if self.get_expectation_line(test).is_skipped_outside_expectations_file:
return 'NOTRUN'
expectations = self.get_expectations(test)
retval = []
# FIXME: WontFix should cause the test to get skipped without artificially adding SKIP to the expectations list.
if WONTFIX in expectations and SKIP in expectations:
expectations.remove(SKIP)
for expectation in expectations:
retval.append(self.expectation_to_string(expectation))
return " ".join(retval)
def expectation_to_string(self, expectation):
"""Return the uppercased string equivalent of a given expectation."""
for item in TestExpectations.EXPECTATIONS.items():
if item[1] == expectation:
return item[0].upper()
raise ValueError(expectation)
def remove_expectation_line(self, test):
if not self.has_test(test):
return
self._clear_expectations_for_test(test)
del self._test_to_expectation_line[test]
def add_expectation_line(self, expectation_line,
model_all_expectations=False):
"""Returns a list of warnings encountered while matching specifiers."""
if expectation_line.is_invalid():
return
for test in expectation_line.matching_tests:
if self._already_seen_better_match(test, expectation_line):
continue
if model_all_expectations:
expectation_line = TestExpectationLine.merge_expectation_lines(self.get_expectation_line(test), expectation_line, model_all_expectations)
self._clear_expectations_for_test(test)
self._test_to_expectation_line[test] = expectation_line
self._add_test(test, expectation_line)
def _add_test(self, test, expectation_line):
"""Sets the expected state for a given test.
This routine assumes the test has not been added before. If it has,
use _clear_expectations_for_test() to reset the state prior to
calling this."""
self._test_to_expectations[test] = expectation_line.parsed_expectations
for expectation in expectation_line.parsed_expectations:
self._expectation_to_tests[expectation].add(test)
self._test_to_specifiers[test] = expectation_line.specifiers
if WONTFIX in expectation_line.parsed_expectations:
self._timeline_to_tests[WONTFIX].add(test)
else:
self._timeline_to_tests[NOW].add(test)
if SKIP in expectation_line.parsed_expectations:
self._result_type_to_tests[SKIP].add(test)
elif expectation_line.parsed_expectations == set([PASS]):
self._result_type_to_tests[PASS].add(test)
elif expectation_line.is_flaky():
self._result_type_to_tests[FLAKY].add(test)
else:
# FIXME: What is this?
self._result_type_to_tests[FAIL].add(test)
def _clear_expectations_for_test(self, test):
"""Remove prexisting expectations for this test.
This happens if we are seeing a more precise path
than a previous listing.
"""
if self.has_test(test):
self._test_to_expectations.pop(test, '')
self._remove_from_sets(test, self._expectation_to_tests)
self._remove_from_sets(test, self._timeline_to_tests)
self._remove_from_sets(test, self._result_type_to_tests)
def _remove_from_sets(self, test, dict_of_sets_of_tests):
"""Removes the given test from the sets in the dictionary.
Args:
test: test to look for
dict: dict of sets of files"""
for set_of_tests in dict_of_sets_of_tests.itervalues():
if test in set_of_tests:
set_of_tests.remove(test)
def _already_seen_better_match(self, test, expectation_line):
"""Returns whether we've seen a better match already in the file.
Returns True if we've already seen a expectation_line.name that matches more of the test
than this path does
"""
# FIXME: See comment below about matching test configs and specificity.
if not self.has_test(test):
# We've never seen this test before.
return False
prev_expectation_line = self._test_to_expectation_line[test]
if prev_expectation_line.filename != expectation_line.filename:
# We've moved on to a new expectation file, which overrides older ones.
return False
if len(prev_expectation_line.path) > len(expectation_line.path):
# The previous path matched more of the test.
return True
if len(prev_expectation_line.path) < len(expectation_line.path):
# This path matches more of the test.
return False
# At this point we know we have seen a previous exact match on this
# base path, so we need to check the two sets of specifiers.
# FIXME: This code was originally designed to allow lines that matched
# more specifiers to override lines that matched fewer specifiers.
# However, we currently view these as errors.
#
# To use the "more specifiers wins" policy, change the errors for overrides
# to be warnings and return False".
if prev_expectation_line.matching_configurations == expectation_line.matching_configurations:
expectation_line.warnings.append('Duplicate or ambiguous entry lines %s:%s and %s:%s.' % (
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
return True
if prev_expectation_line.matching_configurations >= expectation_line.matching_configurations:
expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
# FIXME: return False if we want more specific to win.
return True
if prev_expectation_line.matching_configurations <= expectation_line.matching_configurations:
expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers))
return True
if prev_expectation_line.matching_configurations & expectation_line.matching_configurations:
expectation_line.warnings.append('Entries for %s on lines %s:%s and %s:%s match overlapping sets of configurations.' % (expectation_line.name,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
return True
# Configuration sets are disjoint, then.
return False
class TestExpectations(object):
"""Test expectations consist of lines with specifications of what
to expect from layout test cases. The test cases can be directories
in which case the expectations apply to all test cases in that
directory and any subdirectory. The format is along the lines of:
LayoutTests/fast/js/fixme.js [ Failure ]
LayoutTests/fast/js/flaky.js [ Failure Pass ]
LayoutTests/fast/js/crash.js [ Crash Failure Pass Timeout ]
...
To add specifiers:
LayoutTests/fast/js/no-good.js
[ Debug ] LayoutTests/fast/js/no-good.js [ Pass Timeout ]
[ Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
[ Linux Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
[ Linux Win ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
Skip: Doesn't run the test.
Slow: The test takes a long time to run, but does not timeout indefinitely.
WontFix: For tests that we never intend to pass on a given platform (treated like Skip).
Notes:
-A test cannot be both SLOW and TIMEOUT
-A test can be included twice, but not via the same path.
-If a test is included twice, then the more precise path wins.
-CRASH tests cannot be WONTFIX
"""
# FIXME: Update to new syntax once the old format is no longer supported.
EXPECTATIONS = {'pass': PASS,
'audio': AUDIO,
'fail': FAIL,
'image': IMAGE,
'image+text': IMAGE_PLUS_TEXT,
'text': TEXT,
'timeout': TIMEOUT,
'crash': CRASH,
'leak': LEAK,
'missing': MISSING,
TestExpectationParser.SKIP_MODIFIER: SKIP,
TestExpectationParser.NEEDS_REBASELINE_MODIFIER: NEEDS_REBASELINE,
TestExpectationParser.NEEDS_MANUAL_REBASELINE_MODIFIER: NEEDS_MANUAL_REBASELINE,
TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
TestExpectationParser.SLOW_MODIFIER: SLOW,
TestExpectationParser.REBASELINE_MODIFIER: REBASELINE,
}
EXPECTATIONS_TO_STRING = dict((k, v) for (v, k) in EXPECTATIONS.iteritems())
# (aggregated by category, pass/fail/skip, type)
EXPECTATION_DESCRIPTIONS = {SKIP: 'skipped',
PASS: 'passes',
FAIL: 'failures',
IMAGE: 'image-only failures',
TEXT: 'text-only failures',
IMAGE_PLUS_TEXT: 'image and text failures',
AUDIO: 'audio failures',
CRASH: 'crashes',
LEAK: 'leaks',
TIMEOUT: 'timeouts',
MISSING: 'missing results'}
NON_TEST_OUTCOME_EXPECTATIONS = (REBASELINE, SKIP, SLOW, WONTFIX)
BUILD_TYPES = ('debug', 'release')
TIMELINES = {TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
'now': NOW}
RESULT_TYPES = {'skip': SKIP,
'pass': PASS,
'fail': FAIL,
'flaky': FLAKY}
@classmethod
def expectation_from_string(cls, string):
assert(' ' not in string) # This only handles one expectation at a time.
return cls.EXPECTATIONS.get(string.lower())
@staticmethod
def result_was_expected(result, expected_results, test_needs_rebaselining):
"""Returns whether we got a result we were expecting.
Args:
result: actual result of a test execution
expected_results: set of results listed in test_expectations
test_needs_rebaselining: whether test was marked as REBASELINE"""
if not (set(expected_results) - (set(TestExpectations.NON_TEST_OUTCOME_EXPECTATIONS))):
expected_results = set([PASS])
if result in expected_results:
return True
if result in (PASS, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, MISSING) and (NEEDS_REBASELINE in expected_results or NEEDS_MANUAL_REBASELINE in expected_results):
return True
if result in (TEXT, IMAGE_PLUS_TEXT, AUDIO) and (FAIL in expected_results):
return True
if result == MISSING and test_needs_rebaselining:
return True
if result == SKIP:
return True
return False
@staticmethod
def remove_pixel_failures(expected_results):
"""Returns a copy of the expected results for a test, except that we
drop any pixel failures and return the remaining expectations. For example,
if we're not running pixel tests, then tests expected to fail as IMAGE
will PASS."""
expected_results = expected_results.copy()
if IMAGE in expected_results:
expected_results.remove(IMAGE)
expected_results.add(PASS)
return expected_results
@staticmethod
def remove_non_sanitizer_failures(expected_results):
"""Returns a copy of the expected results for a test, except that we
drop any failures that the sanitizers don't care about."""
expected_results = expected_results.copy()
for result in (IMAGE, FAIL, IMAGE_PLUS_TEXT):
if result in expected_results:
expected_results.remove(result)
expected_results.add(PASS)
return expected_results
@staticmethod
def has_pixel_failures(actual_results):
return IMAGE in actual_results or FAIL in actual_results
@staticmethod
def suffixes_for_expectations(expectations):
suffixes = set()
if IMAGE in expectations:
suffixes.add('png')
if FAIL in expectations:
suffixes.add('txt')
suffixes.add('png')
suffixes.add('wav')
return set(suffixes)
@staticmethod
def suffixes_for_actual_expectations_string(expectations):
suffixes = set()
if 'TEXT' in expectations:
suffixes.add('txt')
if 'IMAGE' in expectations:
suffixes.add('png')
if 'AUDIO' in expectations:
suffixes.add('wav')
if 'MISSING' in expectations:
suffixes.add('txt')
suffixes.add('png')
suffixes.add('wav')
return suffixes
# FIXME: This constructor does too much work. We should move the actual parsing of
# the expectations into separate routines so that linting and handling overrides
# can be controlled separately, and the constructor can be more of a no-op.
def __init__(self, port, tests=None, include_overrides=True, expectations_dict=None, model_all_expectations=False, is_lint_mode=False):
self._full_test_list = tests
self._test_config = port.test_configuration()
self._is_lint_mode = is_lint_mode
self._model_all_expectations = self._is_lint_mode or model_all_expectations
self._model = TestExpectationsModel(self._shorten_filename)
self._parser = TestExpectationParser(port, tests, self._is_lint_mode)
self._port = port
self._skipped_tests_warnings = []
self._expectations = []
if not expectations_dict:
expectations_dict = port.expectations_dict()
# Always parse the generic expectations (the generic file is required
# to be the first one in the expectations_dict, which must be an OrderedDict).
generic_path, generic_exps = expectations_dict.items()[0]
expectations = self._parser.parse(generic_path, generic_exps)
self._add_expectations(expectations, self._model)
self._expectations += expectations
# Now add the overrides if so requested.
if include_overrides:
for path, contents in expectations_dict.items()[1:]:
expectations = self._parser.parse(path, contents)
model = TestExpectationsModel(self._shorten_filename)
self._add_expectations(expectations, model)
self._expectations += expectations
self._model.merge_model(model)
# FIXME: move ignore_tests into port.skipped_layout_tests()
self.add_extra_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
self.add_expectations_from_bot()
self._has_warnings = False
self._report_warnings()
self._process_tests_without_expectations()
# TODO(ojan): Allow for removing skipped tests when getting the list of
# tests to run, but not when getting metrics.
def model(self):
return self._model
def get_needs_rebaseline_failures(self):
return self._model.get_test_set(NEEDS_REBASELINE)
def get_rebaselining_failures(self):
return self._model.get_test_set(REBASELINE)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_expectations(self, test):
return self._model.get_expectations(test)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_tests_with_result_type(self, result_type):
return self._model.get_tests_with_result_type(result_type)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_test_set(self, expectation, include_skips=True):
return self._model.get_test_set(expectation, include_skips)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_tests_with_timeline(self, timeline):
return self._model.get_tests_with_timeline(timeline)
def get_expectations_string(self, test):
return self._model.get_expectations_string(test)
def expectation_to_string(self, expectation):
return self._model.expectation_to_string(expectation)
def matches_an_expected_result(self, test, result, pixel_tests_are_enabled, sanitizer_is_enabled):
expected_results = self._model.get_expectations(test)
if sanitizer_is_enabled:
expected_results = self.remove_non_sanitizer_failures(expected_results)
elif not pixel_tests_are_enabled:
expected_results = self.remove_pixel_failures(expected_results)
return self.result_was_expected(result, expected_results, self.is_rebaselining(test))
def is_rebaselining(self, test):
return REBASELINE in self._model.get_expectations(test)
def _shorten_filename(self, filename):
if filename.startswith(self._port.path_from_webkit_base()):
return self._port.host.filesystem.relpath(filename, self._port.path_from_webkit_base())
return filename
def _report_warnings(self):
warnings = []
for expectation in self._expectations:
for warning in expectation.warnings:
warnings.append('%s:%s %s %s' % (self._shorten_filename(expectation.filename), expectation.line_numbers,
warning, expectation.name if expectation.expectations else expectation.original_string))
if warnings:
self._has_warnings = True
if self._is_lint_mode:
raise ParseError(warnings)
_log.warning('--lint-test-files warnings:')
for warning in warnings:
_log.warning(warning)
_log.warning('')
def _process_tests_without_expectations(self):
if self._full_test_list:
for test in self._full_test_list:
if not self._model.has_test(test):
self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test))
def has_warnings(self):
return self._has_warnings
def remove_configurations(self, removals):
expectations_to_remove = []
modified_expectations = []
for test, test_configuration in removals:
for expectation in self._expectations:
if expectation.name != test or not expectation.parsed_expectations:
continue
if test_configuration not in expectation.matching_configurations:
continue
expectation.matching_configurations.remove(test_configuration)
if expectation.matching_configurations:
modified_expectations.append(expectation)
else:
expectations_to_remove.append(expectation)
for expectation in expectations_to_remove:
index = self._expectations.index(expectation)
self._expectations.remove(expectation)
if index == len(self._expectations) or self._expectations[index].is_whitespace_or_comment():
while index and self._expectations[index - 1].is_whitespace_or_comment():
index = index - 1
self._expectations.pop(index)
return self.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
def _add_expectations(self, expectation_list, model):
for expectation_line in expectation_list:
if not expectation_line.expectations:
continue
if self._model_all_expectations or self._test_config in expectation_line.matching_configurations:
model.add_expectation_line(expectation_line, model_all_expectations=self._model_all_expectations)
def add_extra_skipped_tests(self, tests_to_skip):
if not tests_to_skip:
return
for test in self._expectations:
if test.name and test.name in tests_to_skip:
test.warnings.append('%s:%s %s is also in a Skipped file.' % (test.filename, test.line_numbers, test.name))
model = TestExpectationsModel(self._shorten_filename)
for test_name in tests_to_skip:
expectation_line = self._parser.expectation_for_skipped_test(test_name)
model.add_expectation_line(expectation_line)
self._model.merge_model(model)
def add_expectations_from_bot(self):
# FIXME: With mode 'very-flaky' and 'maybe-flaky', this will show the expectations entry in the flakiness
# dashboard rows for each test to be whatever the bot thinks they should be. Is this a good thing?
bot_expectations = self._port.bot_expectations()
model = TestExpectationsModel(self._shorten_filename)
for test_name in bot_expectations:
expectation_line = self._parser.expectation_line_for_test(test_name, bot_expectations[test_name])
# Unexpected results are merged into existing expectations.
merge = self._port.get_option('ignore_flaky_tests') == 'unexpected'
model.add_expectation_line(expectation_line)
self._model.merge_model(model)
def add_expectation_line(self, expectation_line):
self._model.add_expectation_line(expectation_line)
self._expectations += [expectation_line]
def remove_expectation_line(self, test):
if not self._model.has_test(test):
return
self._expectations.remove(self._model.get_expectation_line(test))
self._model.remove_expectation_line(test)
@staticmethod
def list_to_string(expectation_lines, test_configuration_converter=None, reconstitute_only_these=None):
def serialize(expectation_line):
# If reconstitute_only_these is an empty list, we want to return original_string.
# So we need to compare reconstitute_only_these to None, not just check if it's falsey.
if reconstitute_only_these is None or expectation_line in reconstitute_only_these:
return expectation_line.to_string(test_configuration_converter)
return expectation_line.original_string
def nones_out(expectation_line):
return expectation_line is not None
return "\n".join(filter(nones_out, map(serialize, expectation_lines)))
|
xxshutong/openerp-7.0
|
refs/heads/master
|
openerp/addons/account/report/account_print_invoice.py
|
61
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class account_invoice(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_invoice, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw(
'report.account.invoice',
'account.invoice',
'addons/account/report/account_print_invoice.rml',
parser=account_invoice
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
amit0701/rally
|
refs/heads/master
|
rally/task/functional.py
|
6
|
# Copyright 2015: Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import exceptions
class FunctionalMixin(object):
"""Functional assertions.
The Rally core team deliberately decided not to use an existing framework
for this such a `testtools`.
Using 'testtools' would introduce the following problems:
- Rally production code works with testing tools code that is not designed
to be used in production.
- Rally code depends on a bunch of new libs introduced by testtools and
testtools itself, which means: more code on which Rally is dependent,
more time required to install Rally, more disk space required by Rally.
- Classes like Scenario & Context are inherited from testtools.TestCase
that makes these classes really hard to learn (for instance:
running dir(base.Scenario) you cannot see a ton of methods inside it)
- It won't be clear for end users what exceptions are raised: unittest
exception are going to be raised during production runs instead of
Rally assertion exceptions.
"""
def _concatenate_message(self, default, extended):
if not extended:
return default
if default[-1] != ".":
default += "."
return default + " " + extended.capitalize()
def assertEqual(self, first, second, err_msg=None):
if first != second:
msg = "%s != %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertNotEqual(self, first, second, err_msg=None):
if first == second:
msg = "%s == %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertTrue(self, value, err_msg=None):
if not value:
msg = "%s is not True" % repr(value)
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertFalse(self, value, err_msg=None):
if value:
msg = "%s is not False" % repr(value)
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIs(self, first, second, err_msg=None):
if first is not second:
msg = "%s is not %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIsNot(self, first, second, err_msg=None):
if first is second:
msg = "%s is %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIsNone(self, value, err_msg=None):
if value is not None:
msg = "%s is not None" % repr(value)
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIsNotNone(self, value, err_msg=None):
if value is None:
msg = "%s is None" % repr(value)
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIn(self, member, container, err_msg=None):
if member not in container:
msg = "%s not found in %s" % (repr(member),
repr(container))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertNotIn(self, member, container, err_msg=None):
if member in container:
msg = "%s found in %s" % (repr(member),
repr(container))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIsInstance(self, first, second, err_msg=None):
if not isinstance(first, second):
msg = "%s is not instance of %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
def assertIsNotInstance(self, first, second, err_msg=None):
if isinstance(first, second):
msg = "%s is instance of %s" % (repr(first),
repr(second))
raise exceptions.RallyAssertionError(
self._concatenate_message(msg, err_msg))
|
gitaarik/django
|
refs/heads/master
|
tests/admin_registration/tests.py
|
9
|
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.decorators import register
from django.contrib.admin.sites import site
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase
from .models import Location, Person, Place, Traveler
class NameAdmin(admin.ModelAdmin):
list_display = ['name']
save_on_top = True
class CustomSite(admin.AdminSite):
pass
class TestRegistration(SimpleTestCase):
def setUp(self):
self.site = admin.AdminSite()
def test_bare_registration(self):
self.site.register(Person)
self.assertTrue(
isinstance(self.site._registry[Person], admin.options.ModelAdmin)
)
def test_registration_with_model_admin(self):
self.site.register(Person, NameAdmin)
self.assertTrue(
isinstance(self.site._registry[Person], NameAdmin)
)
def test_prevent_double_registration(self):
self.site.register(Person)
with self.assertRaises(admin.sites.AlreadyRegistered):
self.site.register(Person)
def test_registration_with_star_star_options(self):
self.site.register(Person, search_fields=['name'])
self.assertEqual(self.site._registry[Person].search_fields, ['name'])
def test_star_star_overrides(self):
self.site.register(Person, NameAdmin,
search_fields=["name"], list_display=['__str__'])
self.assertEqual(self.site._registry[Person].search_fields, ['name'])
self.assertEqual(self.site._registry[Person].list_display,
['__str__'])
self.assertTrue(self.site._registry[Person].save_on_top)
def test_iterable_registration(self):
self.site.register([Person, Place], search_fields=['name'])
self.assertTrue(
isinstance(self.site._registry[Person], admin.options.ModelAdmin)
)
self.assertEqual(self.site._registry[Person].search_fields, ['name'])
self.assertTrue(
isinstance(self.site._registry[Place], admin.options.ModelAdmin)
)
self.assertEqual(self.site._registry[Place].search_fields, ['name'])
def test_abstract_model(self):
"""
Exception is raised when trying to register an abstract model.
Refs #12004.
"""
with self.assertRaises(ImproperlyConfigured):
self.site.register(Location)
def test_is_registered_model(self):
"Checks for registered models should return true."
self.site.register(Person)
self.assertTrue(self.site.is_registered(Person))
def test_is_registered_not_registered_model(self):
"Checks for unregistered models should return false."
self.assertFalse(self.site.is_registered(Person))
class TestRegistrationDecorator(SimpleTestCase):
"""
Tests the register decorator in admin.decorators
For clarity:
@register(Person)
class AuthorAdmin(ModelAdmin):
pass
is functionally equal to (the way it is written in these tests):
AuthorAdmin = register(Person)(AuthorAdmin)
"""
def setUp(self):
self.default_site = site
self.custom_site = CustomSite()
def test_basic_registration(self):
register(Person)(NameAdmin)
self.assertTrue(
isinstance(self.default_site._registry[Person],
admin.options.ModelAdmin)
)
self.default_site.unregister(Person)
def test_custom_site_registration(self):
register(Person, site=self.custom_site)(NameAdmin)
self.assertTrue(
isinstance(self.custom_site._registry[Person],
admin.options.ModelAdmin)
)
def test_multiple_registration(self):
register(Traveler, Place)(NameAdmin)
self.assertTrue(
isinstance(self.default_site._registry[Traveler],
admin.options.ModelAdmin)
)
self.default_site.unregister(Traveler)
self.assertTrue(
isinstance(self.default_site._registry[Place],
admin.options.ModelAdmin)
)
self.default_site.unregister(Place)
def test_wrapped_class_not_a_model_admin(self):
with self.assertRaisesMessage(ValueError, 'Wrapped class must subclass ModelAdmin.'):
register(Person)(CustomSite)
def test_custom_site_not_an_admin_site(self):
with self.assertRaisesMessage(ValueError, 'site must subclass AdminSite'):
register(Person, site=Traveler)(NameAdmin)
def test_empty_models_list_registration_fails(self):
with self.assertRaisesMessage(ValueError, 'At least one model must be passed to register.'):
register()(NameAdmin)
|
grilo/ansible-1
|
refs/heads/devel
|
lib/ansible/modules/storage/netapp/na_cdot_volume.py
|
28
|
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_volume
short_description: Manage NetApp cDOT volumes
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar (sumit4@netapp.com)
description:
- Create or destroy volumes on NetApp cDOT
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
required: true
infinite:
description:
- Set True if the volume is an Infinite Volume.
choices: ['True', 'False']
default: 'False'
online:
description:
- Whether the specified volume is online, or not.
choices: ['True', 'False']
default: 'True'
aggregate_name:
description:
- The name of the aggregate the flexvol should exist on. Required when C(state=present).
size:
description:
- The size of the volume in (size_unit). Required when C(state=present).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
vserver:
description:
- Name of the vserver to use.
required: true
default: None
'''
EXAMPLES = """
- name: Create FlexVol
na_cdot_volume:
state: present
name: ansibleVolume
infinite: False
aggregate_name: aggr1
size: 20
size_unit: mb
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Make FlexVol offline
na_cdot_volume:
state: present
name: ansibleVolume
infinite: False
online: False
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTVolume(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
is_infinite=dict(required=False, type='bool', default=False, aliases=['infinite']),
is_online=dict(required=False, type='bool', default=True, aliases=['online']),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
aggregate_name=dict(type='str'),
vserver=dict(required=True, type='str', default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['aggregate_name', 'size'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.is_infinite = p['is_infinite']
self.is_online = p['is_online']
self.size_unit = p['size_unit']
self.vserver = p['vserver']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.aggregate_name = p['aggregate_name']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
def get_volume(self):
"""
Return details about the volume
:param:
name : Name of the volume
:return: Details about the volume. None if not found.
:rtype: dict
"""
volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
volume_id_attributes.add_new_child('name', self.name)
volume_attributes.add_child_elem(volume_id_attributes)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(volume_attributes)
volume_info.add_child_elem(query)
result = self.server.invoke_successfully(volume_info, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
volume_attributes = result.get_child_by_name(
'attributes-list').get_child_by_name(
'volume-attributes')
# Get volume's current size
volume_space_attributes = volume_attributes.get_child_by_name(
'volume-space-attributes')
current_size = volume_space_attributes.get_child_content('size')
# Get volume's state (online/offline)
volume_state_attributes = volume_attributes.get_child_by_name(
'volume-state-attributes')
current_state = volume_state_attributes.get_child_content('state')
is_online = None
if current_state == "online":
is_online = True
elif current_state == "offline":
is_online = False
return_value = {
'name': self.name,
'size': current_size,
'is_online': is_online,
}
return return_value
def create_volume(self):
volume_create = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-create', **{'volume': self.name,
'containing-aggr-name': self.aggregate_name,
'size': str(self.size)})
try:
self.server.invoke_successfully(volume_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error provisioning volume %s of size %s: %s' % (self.name, self.size, to_native(e)),
exception=traceback.format_exc())
def delete_volume(self):
if self.is_infinite:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy-async', **{'volume-name': self.name})
else:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy', **{'name': self.name, 'unmount-and-offline':
'true'})
try:
self.server.invoke_successfully(volume_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error deleting volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def rename_volume(self):
"""
Rename the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume.
"""
if self.is_infinite:
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rename-async',
**{'volume-name': self.name, 'new-volume-name': str(
self.name)})
else:
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rename', **{'volume': self.name, 'new-volume-name': str(
self.name)})
try:
self.server.invoke_successfully(volume_rename,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error renaming volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def resize_volume(self):
"""
Re-size the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume.
"""
if self.is_infinite:
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-size-async',
**{'volume-name': self.name, 'new-size': str(
self.size)})
else:
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-size', **{'volume': self.name, 'new-size': str(
self.size)})
try:
self.server.invoke_successfully(volume_resize,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error re-sizing volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def change_volume_state(self):
"""
Change volume's state (offline/online).
Note: 'is_infinite' needs to be set to True in order to change the
state of an Infinite Volume.
"""
state_requested = None
if self.is_online:
# Requested state is 'online'.
state_requested = "online"
if self.is_infinite:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-online-async',
**{'volume-name': self.name})
else:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-online',
**{'name': self.name})
else:
# Requested state is 'offline'.
state_requested = "offline"
if self.is_infinite:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-offline-async',
**{'volume-name': self.name})
else:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-offline',
**{'name': self.name})
try:
self.server.invoke_successfully(volume_change_state,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error changing the state of volume %s to %s: %s' %
(self.name, state_requested, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
volume_exists = False
rename_volume = False
resize_volume = False
volume_detail = self.get_volume()
if volume_detail:
volume_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
if str(volume_detail['size']) != str(self.size):
resize_volume = True
changed = True
if (volume_detail['is_online'] is not None) and (volume_detail['is_online'] != self.is_online):
changed = True
if self.is_online is False:
# Volume is online, but requested state is offline
pass
else:
# Volume is offline but requested state is online
pass
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not volume_exists:
self.create_volume()
else:
if resize_volume:
self.resize_volume()
if volume_detail['is_online'] is not \
None and volume_detail['is_online'] != \
self.is_online:
self.change_volume_state()
# Ensure re-naming is the last change made.
if rename_volume:
self.rename_volume()
elif self.state == 'absent':
self.delete_volume()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTVolume()
v.apply()
if __name__ == '__main__':
main()
|
msohailamjad/eudaq
|
refs/heads/master
|
legacy/python/example_generic_producer.py
|
14
|
#!/usr/bin/env python2
from PyEUDAQWrapper import * # load the ctypes wrapper
from StandardEvent_pb2 import StandardEvent
import random
from time import sleep
import numpy as np # for data handling
print "Starting PyProducer"
# create PyProducer instance
pp = PyProducer("GENERIC","tcp://localhost:44000")
i = 0 # counter variables for wait routines
maxwait = 100
waittime = .5
# wait for configure cmd from RunControl
while i<maxwait and not pp.Configuring:
sleep(waittime)
print "Waiting for configure for ",i*waittime," seconds"
i+=1
# check if configuration received
if pp.Configuring:
print "Ready to configure, received config string 'Parameter'=",pp.GetConfigParameter("Parameter")
# .... do your config stuff here ...
sleep(2)
pp.Configuring = True
# check for start of run cmd from RunControl
while i<maxwait and not pp.StartingRun:
sleep(waittime)
print "Waiting for run start for ",i*waittime," seconds"
i+=1
# check if we are starting:
if pp.StartingRun:
print "Ready to run!"
# ... prepare your system for the immanent run start
sleep(2)
pp.StartingRun = True # set status and send BORE
# starting to run
tluevent = 0
while not pp.Error and not pp.StoppingRun and not pp.Terminating:
event = StandardEvent()
plane = event.plane.add() #add one plane
plane.type = "OO"
plane.id = 0
plane.tluevent = tluevent
plane.xsize = 64
plane.ysize = 32
frame = plane.frame.add()
for _ in range(random.randint(1,16)):
pix = frame.pixel.add()
pix.x = random.randint(0,63)
pix.y = random.randint(0,31)
pix.val = 1
tluevent = tluevent + 1
data = np.fromstring(event.SerializeToString(), dtype=np.uint8)
print tluevent, data
pp.SendEvent(data) # send event off
sleep(2) # wait for a little while
# check if the run is stopping regularly
if pp.StoppingRun:
pp.StoppingRun=True # set status and send EORE
|
overtherain/scriptfile
|
refs/heads/master
|
software/googleAppEngine/lib/jinja2/jinja2/ext.py
|
114
|
# -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from collections import deque
from jinja2 import nodes
from jinja2.defaults import *
from jinja2.environment import Environment
from jinja2.runtime import Undefined, concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup, next
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(object):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
__metaclass__ = ExtensionRegistry
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, basestring):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in variables.iteritems():
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(['with'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endwith',),
drop_needle=True))
return node
class AutoEscapeExtension(Extension):
"""Changes auto escape rules for a scope."""
tags = set(['autoescape'])
def parse(self, parser):
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
node.options = [
nodes.Keyword('autoescape', parser.parse_expression())
]
node.body = parser.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, basestring):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
options.get(key, str(default)).lower() in ('1', 'on', 'yes', 'true')
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
NEWLINE_SEQUENCE, frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError, e:
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
|
anthonydillon/horizon
|
refs/heads/master
|
openstack_dashboard/test/integration_tests/tests/decorators.py
|
23
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import inspect
import testtools
from openstack_dashboard.test.integration_tests import config
def _is_test_method_name(method):
return method.startswith('test_')
def _is_test_fixture(method):
return method in ['setUp', 'tearDown']
def _is_test_cls(cls):
return cls.__name__.startswith('Test')
def _mark_method_skipped(meth, reason):
"""Mark method as skipped by replacing the actual method with wrapper
that raises the testtools.testcase.TestSkipped exception.
"""
@functools.wraps(meth)
def wrapper(*args, **kwargs):
raise testtools.testcase.TestSkipped(reason)
return wrapper
def _mark_class_skipped(cls, reason):
"""Mark every test method of the class as skipped."""
tests = [attr for attr in dir(cls) if _is_test_method_name(attr) or
_is_test_fixture(attr)]
for test in tests:
method = getattr(cls, test)
if callable(method):
setattr(cls, test, _mark_method_skipped(method, reason))
return cls
NOT_TEST_OBJECT_ERROR_MSG = "Decorator can be applied only on test" \
" classes and test methods."
def services_required(*req_services):
"""Decorator for marking test's service requirements,
if requirements are not met in the configuration file
test is marked as skipped.
Usage:
from openstack_dashboard.test.integration_tests.tests import decorators
@decorators.services_required("sahara")
class TestLogin(helpers.BaseTestCase):
.
.
.
from openstack_dashboard.test.integration_tests.tests import decorators
class TestLogin(helpers.BaseTestCase):
@decorators.services_required("sahara")
def test_login(self):
login_pg = loginpage.LoginPage(self.driver, self.conf)
.
.
.
"""
def actual_decoration(obj):
# make sure that we can decorate method and classes as well
if inspect.isclass(obj):
if not _is_test_cls(obj):
raise ValueError(NOT_TEST_OBJECT_ERROR_MSG)
skip_method = _mark_class_skipped
else:
if not _is_test_method_name(obj.__name__):
raise ValueError(NOT_TEST_OBJECT_ERROR_MSG)
skip_method = _mark_method_skipped
# get available services from configuration
avail_services = config.get_config().service_available
for req_service in req_services:
if not getattr(avail_services, req_service, False):
obj = skip_method(obj, "%s service is required for this test"
" to work properly." % req_service)
break
return obj
return actual_decoration
def skip_because(**kwargs):
"""Decorator for skipping tests hitting known bugs
Usage:
from openstack_dashboard.test.integration_tests.tests import decorators
class TestDashboardHelp(helpers.TestCase):
@decorators.skip_because(bugs=["1234567"])
def test_dashboard_help_redirection(self):
.
.
.
"""
def actual_decoration(obj):
if inspect.isclass(obj):
if not _is_test_cls(obj):
raise ValueError(NOT_TEST_OBJECT_ERROR_MSG)
skip_method = _mark_class_skipped
else:
if not _is_test_method_name(obj.__name__):
raise ValueError(NOT_TEST_OBJECT_ERROR_MSG)
skip_method = _mark_method_skipped
bugs = kwargs.get("bugs")
if bugs and isinstance(bugs, collections.Iterable):
for bug in bugs:
if not bug.isdigit():
raise ValueError("bug must be a valid bug number")
obj = skip_method(obj, "Skipped until Bugs: %s are resolved." %
", ".join([bug for bug in bugs]))
return obj
return actual_decoration
|
ngvoice/android-client
|
refs/heads/master
|
phone/jni/pjsip/sources/tests/pjsua/scripts-sendto/120_sdp_with_video_dynamic_1.py
|
59
|
# $Id: 120_sdp_with_video_dynamic_1.py 2081 2008-06-27 21:59:15Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
# Video uses dynamic payload type
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=-
c=IN IP4 127.0.0.1
t=0 0
m=audio 5000 RTP/AVP 0
m=video 4000 RTP/AVP 100
a=rtpmap:100 myvideo/80000
"""
pjsua_args = "--null-audio --auto-answer 200"
extra_headers = ""
include = ["Content-Type: application/sdp", # response must include SDP
"m=audio [1-9]+[0-9]* RTP/AVP[\\s\\S]+m=video 0 RTP/AVP"
]
exclude = []
sendto_cfg = sip.SendtoCfg("Mixed audio and video", pjsua_args, sdp, 200,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
|
TheComet93/ontology
|
refs/heads/master
|
tests/gmock/gtest/test/gtest_catch_exceptions_test.py
|
2139
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
|
aimas/TuniErp-8.0
|
refs/heads/master
|
addons/account_followup/report/account_followup_print.py
|
222
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from collections import defaultdict
from openerp.osv import osv, fields
from openerp.report import report_sxw
class report_rappel(report_sxw.rml_parse):
_name = "account_followup.report.rappel"
def __init__(self, cr, uid, name, context=None):
super(report_rappel, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'ids_to_objects': self._ids_to_objects,
'getLines': self._lines_get,
'get_text': self._get_text
})
def _ids_to_objects(self, ids):
all_lines = []
for line in self.pool['account_followup.stat.by.partner'].browse(self.cr, self.uid, ids):
if line not in all_lines:
all_lines.append(line)
return all_lines
def _lines_get(self, stat_by_partner_line):
return self._lines_get_with_partner(stat_by_partner_line.partner_id, stat_by_partner_line.company_id.id)
def _lines_get_with_partner(self, partner, company_id):
moveline_obj = self.pool['account.move.line']
moveline_ids = moveline_obj.search(self.cr, self.uid, [
('partner_id', '=', partner.id),
('account_id.type', '=', 'receivable'),
('reconcile_id', '=', False),
('state', '!=', 'draft'),
('company_id', '=', company_id),
'|', ('date_maturity', '=', False), ('date_maturity', '<=', fields.date.context_today(self, self.cr, self.uid)),
])
# lines_per_currency = {currency: [line data, ...], ...}
lines_per_currency = defaultdict(list)
for line in moveline_obj.browse(self.cr, self.uid, moveline_ids):
currency = line.currency_id or line.company_id.currency_id
line_data = {
'name': line.move_id.name,
'ref': line.ref,
'date': line.date,
'date_maturity': line.date_maturity,
'balance': line.amount_currency if currency != line.company_id.currency_id else line.debit - line.credit,
'blocked': line.blocked,
'currency_id': currency,
}
lines_per_currency[currency].append(line_data)
return [{'line': lines, 'currency': currency} for currency, lines in lines_per_currency.items()]
def _get_text(self, stat_line, followup_id, context=None):
context = dict(context or {}, lang=stat_line.partner_id.lang)
fp_obj = self.pool['account_followup.followup']
fp_line = fp_obj.browse(self.cr, self.uid, followup_id, context=context).followup_line
if not fp_line:
raise osv.except_osv(_('Error!'),_("The followup plan defined for the current company does not have any followup action."))
#the default text will be the first fp_line in the sequence with a description.
default_text = ''
li_delay = []
for line in fp_line:
if not default_text and line.description:
default_text = line.description
li_delay.append(line.delay)
li_delay.sort(reverse=True)
a = {}
#look into the lines of the partner that already have a followup level, and take the description of the higher level for which it is available
partner_line_ids = self.pool['account.move.line'].search(self.cr, self.uid, [('partner_id','=',stat_line.partner_id.id),('reconcile_id','=',False),('company_id','=',stat_line.company_id.id),('blocked','=',False),('state','!=','draft'),('debit','!=',False),('account_id.type','=','receivable'),('followup_line_id','!=',False)])
partner_max_delay = 0
partner_max_text = ''
for i in self.pool['account.move.line'].browse(self.cr, self.uid, partner_line_ids, context=context):
if i.followup_line_id.delay > partner_max_delay and i.followup_line_id.description:
partner_max_delay = i.followup_line_id.delay
partner_max_text = i.followup_line_id.description
text = partner_max_delay and partner_max_text or default_text
if text:
lang_obj = self.pool['res.lang']
lang_ids = lang_obj.search(self.cr, self.uid, [('code', '=', stat_line.partner_id.lang)], context=context)
date_format = lang_ids and lang_obj.browse(self.cr, self.uid, lang_ids[0], context=context).date_format or '%Y-%m-%d'
text = text % {
'partner_name': stat_line.partner_id.name,
'date': time.strftime(date_format),
'company_name': stat_line.company_id.name,
'user_signature': self.pool['res.users'].browse(self.cr, self.uid, self.uid, context).signature or '',
}
return text
class report_followup(osv.AbstractModel):
_name = 'report.account_followup.report_followup'
_inherit = 'report.abstract_report'
_template = 'account_followup.report_followup'
_wrapped_report_class = report_rappel
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/validators/histogram2d/_hovertemplate.py
|
1
|
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="hovertemplate", parent_name="histogram2d", **kwargs
):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
hungtt57/matchmaker
|
refs/heads/master
|
lib/python2.7/site-packages/idna/intranges.py
|
154
|
"""
Given a list of integers, made up of (hopefully) a small number of long runs
of consecutive integers, compute a representation of the form
((start1, end1), (start2, end2) ...). Then answer the question "was x present
in the original list?" in time O(log(# runs)).
"""
import bisect
def intranges_from_list(list_):
"""Represent a list of integers as a sequence of ranges:
((start_0, end_0), (start_1, end_1), ...), such that the original
integers are exactly those x such that start_i <= x < end_i for some i.
"""
sorted_list = sorted(list_)
ranges = []
last_write = -1
for i in range(len(sorted_list)):
if i+1 < len(sorted_list):
if sorted_list[i] == sorted_list[i+1]-1:
continue
current_range = sorted_list[last_write+1:i+1]
range_tuple = (current_range[0], current_range[-1] + 1)
ranges.append(range_tuple)
last_write = i
return tuple(ranges)
def intranges_contain(int_, ranges):
"""Determine if `int_` falls into one of the ranges in `ranges`."""
tuple_ = (int_, int_)
pos = bisect.bisect_left(ranges, tuple_)
# we could be immediately ahead of a tuple (start, end)
# with start < int_ <= end
if pos > 0:
left, right = ranges[pos-1]
if left <= int_ < right:
return True
# or we could be immediately behind a tuple (int_, end)
if pos < len(ranges):
left, _ = ranges[pos]
if left == int_:
return True
return False
|
fritsvanveen/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/qgis/ReverseLineDirection.py
|
5
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ReverseLineDirection.py
-----------------------
Date : November 2015
Copyright : (C) 2015 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'November 2015'
__copyright__ = '(C) 2015, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive323
__revision__ = '$Format:%H$'
from qgis.core import QgsGeometry, QgsFeature
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class ReverseLineDirection(GeoAlgorithm):
INPUT_LAYER = 'INPUT_LAYER'
OUTPUT_LAYER = 'OUTPUT_LAYER'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Reverse line direction')
self.group, self.i18n_group = self.trAlgorithm('Vector geometry tools')
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer'), [dataobjects.TYPE_VECTOR_LINE]))
self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Reversed'), datatype=[dataobjects.TYPE_VECTOR_LINE]))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT_LAYER))
writer = self.getOutputFromName(
self.OUTPUT_LAYER).getVectorWriter(
layer.fields().toList(),
layer.wkbType(),
layer.crs())
outFeat = QgsFeature()
features = vector.features(layer)
total = 100.0 / len(features)
for current, inFeat in enumerate(features):
inGeom = inFeat.geometry()
attrs = inFeat.attributes()
outGeom = None
if not inGeom.isEmpty():
reversedLine = inGeom.geometry().reversed()
if not reversedLine:
raise GeoAlgorithmExecutionException(
self.tr('Error reversing line'))
outGeom = QgsGeometry(reversedLine)
outFeat.setGeometry(outGeom)
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
progress.setPercentage(int(current * total))
del writer
|
amenonsen/ansible
|
refs/heads/devel
|
lib/ansible/plugins/inventory/kubevirt.py
|
37
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: kubevirt
plugin_type: inventory
author:
- KubeVirt Team (@kubevirt)
version_added: "2.8"
short_description: KubeVirt inventory source
extends_documentation_fragment:
- inventory_cache
- constructed
description:
- Fetch running VirtualMachines for one or more namespaces.
- Groups by namespace, namespace_vms and labels.
- Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
options:
plugin:
description: token that ensures this is a source file for the 'kubevirt' plugin.
required: True
choices: ['kubevirt']
type: str
host_format:
description:
- Specify the format of the host in the inventory group.
default: "{namespace}-{name}-{uid}"
connections:
type: list
description:
- Optional list of cluster connection settings. If no connections are provided, the default
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
the active user is authorized to access.
suboptions:
name:
description:
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
and port.
type: str
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the OpenShift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
environment variable.
type: str
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
type: str
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
type: str
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
variable.
type: str
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
environment variable.
type: str
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
environment variable.
type: str
cert_file:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment variable.
type: str
key_file:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST
environment variable.
type: str
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via
K8S_AUTH_SSL_CA_CERT environment variable.
type: str
verify_ssl:
description:
- "Whether or not to verify the API server's SSL certificates. Can also be specified via
K8S_AUTH_VERIFY_SSL environment variable."
type: bool
namespaces:
description:
- List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized
to access.
type: list
network_name:
description:
- In case of multiple network attached to virtual machine, define which interface should be returned as primary IP
address.
type: str
api_version:
description:
- "Specify the KubeVirt API version."
type: str
annotation_variable:
description:
- "Specify the name of the annotation which provides data, which should be used as inventory host variables."
- "Note, that the value in ansible annotations should be json."
type: str
default: 'ansible'
requirements:
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
# File must be named kubevirt.yaml or kubevirt.yml
# Authenticate with token, and return all virtual machines for all namespaces
plugin: kubevirt
connections:
- host: https://kubevirt.io
token: xxxxxxxxxxxxxxxx
ssl_verify: false
# Use default config (~/.kube/config) file and active context, and return vms with interfaces
# connected to network myovsnetwork and from namespace vms
plugin: kubevirt
connections:
- namespaces:
- vms
network_name: myovsnetwork
'''
import json
from ansible.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
try:
from openshift.dynamic.exceptions import DynamicApiError
except ImportError:
pass
API_VERSION = 'kubevirt.io/v1alpha3'
class InventoryModule(K8sInventoryModule):
NAME = 'kubevirt'
def setup(self, config_data, cache, cache_key):
self.config_data = config_data
super(InventoryModule, self).setup(config_data, cache, cache_key)
def fetch_objects(self, connections):
client = self.get_api_client()
vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}')
if connections:
for connection in connections:
client = self.get_api_client(**connection)
name = connection.get('name', self.get_default_host_name(client.configuration.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
else:
namespaces = self.get_available_namespaces(client)
interface_name = connection.get('network_name')
api_version = connection.get('api_version', API_VERSION)
annotation_variable = connection.get('annotation_variable', 'ansible')
for namespace in namespaces:
self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable)
else:
name = self.get_default_host_name(client.configuration.host)
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable)
def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None):
v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance')
try:
obj = v1_vm.get(namespace=namespace)
except DynamicApiError as exc:
self.display.debug(exc)
raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc))
namespace_group = 'namespace_{0}'.format(namespace)
namespace_vms_group = '{0}_vms'.format(namespace_group)
name = self._sanitize_group_name(name)
namespace_group = self._sanitize_group_name(namespace_group)
namespace_vms_group = self._sanitize_group_name(namespace_vms_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_vms_group)
self.inventory.add_child(namespace_group, namespace_vms_group)
for vm in obj.items:
if not (vm.status and vm.status.interfaces):
continue
# Find interface by its name:
if interface_name is None:
interface = vm.status.interfaces[0]
else:
interface = next(
(i for i in vm.status.interfaces if i.name == interface_name),
None
)
# If interface is not found or IP address is not reported skip this VM:
if interface is None or interface.ipAddress is None:
continue
vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid)
vm_ip = interface.ipAddress
vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations)
self.inventory.add_host(vm_name)
if vm.metadata.labels:
# create a group for each label_value
for key, value in vm.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
group_name = self._sanitize_group_name(group_name)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, vm_name)
vm_labels = dict(vm.metadata.labels)
else:
vm_labels = {}
self.inventory.add_child(namespace_vms_group, vm_name)
# add hostvars
self.inventory.set_variable(vm_name, 'ansible_host', vm_ip)
self.inventory.set_variable(vm_name, 'labels', vm_labels)
self.inventory.set_variable(vm_name, 'annotations', vm_annotations)
self.inventory.set_variable(vm_name, 'object_type', 'vm')
self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion)
self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid)
# Add all variables which are listed in 'ansible' annotation:
annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}"))
for k, v in annotations_data.items():
self.inventory.set_variable(vm_name, k, v)
def verify_file(self, path):
if super(InventoryModule, self).verify_file(path):
if path.endswith(('kubevirt.yml', 'kubevirt.yaml')):
return True
return False
|
MjAbuz/watchdog
|
refs/heads/master
|
vendor/xappy/searchconnection.py
|
4
|
#!/usr/bin/env python
#
# Copyright (C) 2007 Lemur Consulting Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
r"""searchconnection.py: A connection to the search engine for searching.
"""
__docformat__ = "restructuredtext en"
import _checkxapian
import os as _os
import cPickle as _cPickle
import math
import xapian as _xapian
from datastructures import *
from fieldactions import *
import fieldmappings as _fieldmappings
import highlight as _highlight
import errors as _errors
import indexerconnection as _indexerconnection
import re as _re
from replaylog import log as _log
class SearchResult(ProcessedDocument):
"""A result from a search.
As well as being a ProcessedDocument representing the document in the
database, the result has several members which may be used to get
information about how well the document matches the search:
- `rank`: The rank of the document in the search results, starting at 0
(ie, 0 is the "top" result, 1 is the second result, etc).
- `weight`: A floating point number indicating the weight of the result
document. The value is only meaningful relative to other results for a
given search - a different search, or the same search with a different
database, may give an entirely different scale to the weights. This
should not usually be displayed to users, but may be useful if trying to
perform advanced reweighting operations on search results.
- `percent`: A percentage value for the weight of a document. This is
just a rescaled form of the `weight` member. It doesn't represent any
kind of probability value; the only real meaning of the numbers is that,
within a single set of results, a document with a higher percentage
corresponds to a better match. Because the percentage doesn't really
represent a probability, or a confidence value, it is probably unhelpful
to display it to most users, since they tend to place an over emphasis
on its meaning. However, it is included because it may be useful
occasionally.
"""
def __init__(self, msetitem, results):
ProcessedDocument.__init__(self, results._fieldmappings, msetitem.document)
self.rank = msetitem.rank
self.weight = msetitem.weight
self.percent = msetitem.percent
self._results = results
def _get_language(self, field):
"""Get the language that should be used for a given field.
Raises a KeyError if the field is not known.
"""
actions = self._results._conn._field_actions[field]._actions
for action, kwargslist in actions.iteritems():
if action == FieldActions.INDEX_FREETEXT:
for kwargs in kwargslist:
try:
return kwargs['language']
except KeyError:
pass
return 'none'
def summarise(self, field, maxlen=600, hl=('<b>', '</b>'), query=None):
"""Return a summarised version of the field specified.
This will return a summary of the contents of the field stored in the
search result, with words which match the query highlighted.
The maximum length of the summary (in characters) may be set using the
maxlen parameter.
The return value will be a string holding the summary, with
highlighting applied. If there are multiple instances of the field in
the document, the instances will be joined with a newline character.
To turn off highlighting, set hl to None. Each highlight will consist
of the first entry in the `hl` list being placed before the word, and
the second entry in the `hl` list being placed after the word.
Any XML or HTML style markup tags in the field will be stripped before
the summarisation algorithm is applied.
If `query` is supplied, it should contain a Query object, as returned
from SearchConnection.query_parse() or related methods, which will be
used as the basis of the summarisation and highlighting rather than the
query which was used for the search.
Raises KeyError if the field is not known.
"""
highlighter = _highlight.Highlighter(language_code=self._get_language(field))
field = self.data[field]
results = []
text = '\n'.join(field)
if query is None:
query = self._results._query
return highlighter.makeSample(text, query, maxlen, hl)
def highlight(self, field, hl=('<b>', '</b>'), strip_tags=False, query=None):
"""Return a highlighted version of the field specified.
This will return all the contents of the field stored in the search
result, with words which match the query highlighted.
The return value will be a list of strings (corresponding to the list
of strings which is the raw field data).
Each highlight will consist of the first entry in the `hl` list being
placed before the word, and the second entry in the `hl` list being
placed after the word.
If `strip_tags` is True, any XML or HTML style markup tags in the field
will be stripped before highlighting is applied.
If `query` is supplied, it should contain a Query object, as returned
from SearchConnection.query_parse() or related methods, which will be
used as the basis of the summarisation and highlighting rather than the
query which was used for the search.
Raises KeyError if the field is not known.
"""
highlighter = _highlight.Highlighter(language_code=self._get_language(field))
field = self.data[field]
results = []
if query is None:
query = self._results._query
for text in field:
results.append(highlighter.highlight(text, query, hl, strip_tags))
return results
def __repr__(self):
return ('<SearchResult(rank=%d, id=%r, data=%r)>' %
(self.rank, self.id, self.data))
class SearchResultIter(object):
"""An iterator over a set of results from a search.
"""
def __init__(self, results, order):
self._results = results
self._order = order
if self._order is None:
self._iter = iter(results._mset)
else:
self._iter = iter(self._order)
def next(self):
if self._order is None:
msetitem = self._iter.next()
else:
index = self._iter.next()
msetitem = self._results._mset.get_hit(index)
return SearchResult(msetitem, self._results)
def _get_significant_digits(value, lower, upper):
"""Get the significant digits of value which are constrained by the
(inclusive) lower and upper bounds.
If there are no significant digits which are definitely within the
bounds, exactly one significant digit will be returned in the result.
>>> _get_significant_digits(15,15,15)
15
>>> _get_significant_digits(15,15,17)
20
>>> _get_significant_digits(4777,208,6000)
5000
>>> _get_significant_digits(4777,4755,4790)
4800
>>> _get_significant_digits(4707,4695,4710)
4700
>>> _get_significant_digits(4719,4717,4727)
4720
>>> _get_significant_digits(0,0,0)
0
>>> _get_significant_digits(9,9,10)
9
>>> _get_significant_digits(9,9,100)
9
"""
assert(lower <= value)
assert(value <= upper)
diff = upper - lower
# Get the first power of 10 greater than the difference.
# This corresponds to the magnitude of the smallest significant digit.
if diff == 0:
pos_pow_10 = 1
else:
pos_pow_10 = int(10 ** math.ceil(math.log10(diff)))
# Special case for situation where we don't have any significant digits:
# get the magnitude of the most significant digit in value.
if pos_pow_10 > value:
if value == 0:
pos_pow_10 = 1
else:
pos_pow_10 = int(10 ** math.floor(math.log10(value)))
# Return the value, rounded to the nearest multiple of pos_pow_10
return ((value + pos_pow_10 // 2) // pos_pow_10) * pos_pow_10
class SearchResults(object):
"""A set of results of a search.
"""
def __init__(self, conn, enq, query, mset, fieldmappings, tagspy,
tagfields, facetspy, facetfields, facethierarchy,
facetassocs):
self._conn = conn
self._enq = enq
self._query = query
self._mset = mset
self._mset_order = None
self._fieldmappings = fieldmappings
self._tagspy = tagspy
if tagfields is None:
self._tagfields = None
else:
self._tagfields = set(tagfields)
self._facetspy = facetspy
self._facetfields = facetfields
self._facethierarchy = facethierarchy
self._facetassocs = facetassocs
self._numeric_ranges_built = {}
def _cluster(self, num_clusters, maxdocs, fields=None):
"""Cluster results based on similarity.
Note: this method is experimental, and will probably disappear or
change in the future.
The number of clusters is specified by num_clusters: unless there are
too few results, there will be exaclty this number of clusters in the
result.
"""
clusterer = _xapian.ClusterSingleLink()
xapclusters = _xapian.ClusterAssignments()
docsim = _xapian.DocSimCosine()
source = _xapian.MSetDocumentSource(self._mset, maxdocs)
if fields is None:
clusterer.cluster(self._conn._index, xapclusters, docsim, source, num_clusters)
else:
decider = self._make_expand_decider(fields)
clusterer.cluster(self._conn._index, xapclusters, docsim, source, decider, num_clusters)
newid = 0
idmap = {}
clusters = {}
for item in self._mset:
docid = item.docid
clusterid = xapclusters.cluster(docid)
if clusterid not in idmap:
idmap[clusterid] = newid
newid += 1
clusterid = idmap[clusterid]
if clusterid not in clusters:
clusters[clusterid] = []
clusters[clusterid].append(item.rank)
return clusters
def _reorder_by_clusters(self, clusters):
"""Reorder the mset based on some clusters.
"""
if self.startrank != 0:
raise _errors.SearchError("startrank must be zero to reorder by clusters")
reordered = False
tophits = []
nottophits = []
clusterstarts = dict(((c[0], None) for c in clusters.itervalues()))
for i in xrange(self.endrank):
if i in clusterstarts:
tophits.append(i)
else:
nottophits.append(i)
self._mset_order = tophits
self._mset_order.extend(nottophits)
def _make_expand_decider(self, fields):
"""Make an expand decider which accepts only terms in the specified
field.
"""
prefixes = {}
if isinstance(fields, basestring):
fields = [fields]
for field in fields:
try:
actions = self._conn._field_actions[field]._actions
except KeyError:
continue
for action, kwargslist in actions.iteritems():
if action == FieldActions.INDEX_FREETEXT:
prefix = self._conn._field_mappings.get_prefix(field)
prefixes[prefix] = None
prefixes['Z' + prefix] = None
if action in (FieldActions.INDEX_EXACT,
FieldActions.TAG,
FieldActions.FACET,):
prefix = self._conn._field_mappings.get_prefix(field)
prefixes[prefix] = None
prefix_re = _re.compile('|'.join([_re.escape(x) + '[^A-Z]' for x in prefixes.keys()]))
class decider(_xapian.ExpandDecider):
def __call__(self, term):
return prefix_re.match(term) is not None
return decider()
def _reorder_by_similarity(self, count, maxcount, max_similarity,
fields=None):
"""Reorder results based on similarity.
The top `count` documents will be chosen such that they are relatively
dissimilar. `maxcount` documents will be considered for moving around,
and `max_similarity` is a value between 0 and 1 indicating the maximum
similarity to the previous document before a document is moved down the
result set.
Note: this method is experimental, and will probably disappear or
change in the future.
"""
if self.startrank != 0:
raise _errors.SearchError("startrank must be zero to reorder by similiarity")
ds = _xapian.DocSimCosine()
ds.set_termfreqsource(_xapian.DatabaseTermFreqSource(self._conn._index))
if fields is not None:
ds.set_expand_decider(self._make_expand_decider(fields))
tophits = []
nottophits = []
full = False
reordered = False
sim_count = 0
new_order = []
end = min(self.endrank, maxcount)
for i in xrange(end):
if full:
new_order.append(i)
continue
hit = self._mset.get_hit(i)
if len(tophits) == 0:
tophits.append(hit)
continue
# Compare each incoming hit to tophits
maxsim = 0.0
for tophit in tophits[-1:]:
sim_count += 1
sim = ds.similarity(hit.document, tophit.document)
if sim > maxsim:
maxsim = sim
# If it's not similar to an existing hit, add to tophits.
if maxsim < max_similarity:
tophits.append(hit)
else:
nottophits.append(hit)
reordered = True
# If we're full of hits, append to the end.
if len(tophits) >= count:
for hit in tophits:
new_order.append(hit.rank)
for hit in nottophits:
new_order.append(hit.rank)
full = True
if not full:
for hit in tophits:
new_order.append(hit.rank)
for hit in nottophits:
new_order.append(hit.rank)
if end != self.endrank:
new_order.extend(range(end, self.endrank))
assert len(new_order) == self.endrank
if reordered:
self._mset_order = new_order
else:
assert new_order == range(self.endrank)
def __repr__(self):
return ("<SearchResults(startrank=%d, "
"endrank=%d, "
"more_matches=%s, "
"matches_lower_bound=%d, "
"matches_upper_bound=%d, "
"matches_estimated=%d, "
"estimate_is_exact=%s)>" %
(
self.startrank,
self.endrank,
self.more_matches,
self.matches_lower_bound,
self.matches_upper_bound,
self.matches_estimated,
self.estimate_is_exact,
))
def _get_more_matches(self):
# This check relies on us having asked for at least one more result
# than retrieved to be checked.
return (self.matches_lower_bound > self.endrank)
more_matches = property(_get_more_matches, doc=
"""Check whether there are further matches after those in this result set.
""")
def _get_startrank(self):
return self._mset.get_firstitem()
startrank = property(_get_startrank, doc=
"""Get the rank of the first item in the search results.
This corresponds to the "startrank" parameter passed to the search() method.
""")
def _get_endrank(self):
return self._mset.get_firstitem() + len(self._mset)
endrank = property(_get_endrank, doc=
"""Get the rank of the item after the end of the search results.
If there are sufficient results in the index, this corresponds to the
"endrank" parameter passed to the search() method.
""")
def _get_lower_bound(self):
return self._mset.get_matches_lower_bound()
matches_lower_bound = property(_get_lower_bound, doc=
"""Get a lower bound on the total number of matching documents.
""")
def _get_upper_bound(self):
return self._mset.get_matches_upper_bound()
matches_upper_bound = property(_get_upper_bound, doc=
"""Get an upper bound on the total number of matching documents.
""")
def _get_human_readable_estimate(self):
lower = self._mset.get_matches_lower_bound()
upper = self._mset.get_matches_upper_bound()
est = self._mset.get_matches_estimated()
return _get_significant_digits(est, lower, upper)
matches_human_readable_estimate = property(_get_human_readable_estimate,
doc=
"""Get a human readable estimate of the number of matching documents.
This consists of the value returned by the "matches_estimated" property,
rounded to an appropriate number of significant digits (as determined by
the values of the "matches_lower_bound" and "matches_upper_bound"
properties).
""")
def _get_estimated(self):
return self._mset.get_matches_estimated()
matches_estimated = property(_get_estimated, doc=
"""Get an estimate for the total number of matching documents.
""")
def _estimate_is_exact(self):
return self._mset.get_matches_lower_bound() == \
self._mset.get_matches_upper_bound()
estimate_is_exact = property(_estimate_is_exact, doc=
"""Check whether the estimated number of matching documents is exact.
If this returns true, the estimate given by the `matches_estimated`
property is guaranteed to be correct.
If this returns false, it is possible that the actual number of matching
documents is different from the number given by the `matches_estimated`
property.
""")
def get_hit(self, index):
"""Get the hit with a given index.
"""
if self._mset_order is None:
msetitem = self._mset.get_hit(index)
else:
msetitem = self._mset.get_hit(self._mset_order[index])
return SearchResult(msetitem, self)
__getitem__ = get_hit
def __iter__(self):
"""Get an iterator over the hits in the search result.
The iterator returns the results in increasing order of rank.
"""
return SearchResultIter(self, self._mset_order)
def __len__(self):
"""Get the number of hits in the search result.
Note that this is not (usually) the number of matching documents for
the search. If startrank is non-zero, it's not even the rank of the
last document in the search result. It's simply the number of hits
stored in the search result.
It is, however, the number of items returned by the iterator produced
by calling iter() on this SearchResults object.
"""
return len(self._mset)
def get_top_tags(self, field, maxtags):
"""Get the most frequent tags in a given field.
- `field` - the field to get tags for. This must have been specified
in the "gettags" argument of the search() call.
- `maxtags` - the maximum number of tags to return.
Returns a sequence of 2-item tuples, in which the first item in the
tuple is the tag, and the second is the frequency of the tag in the
matches seen (as an integer).
"""
if 'tags' in _checkxapian.missing_features:
raise errors.SearchError("Tags unsupported with this release of xapian")
if self._tagspy is None or field not in self._tagfields:
raise _errors.SearchError("Field %r was not specified for getting tags" % field)
prefix = self._conn._field_mappings.get_prefix(field)
return self._tagspy.get_top_terms(prefix, maxtags)
def get_suggested_facets(self, maxfacets=5, desired_num_of_categories=7,
required_facets=None):
"""Get a suggested set of facets, to present to the user.
This returns a list, in descending order of the usefulness of the
facet, in which each item is a tuple holding:
- fieldname of facet.
- sequence of 2-tuples holding the suggested values or ranges for that
field:
For facets of type 'string', the first item in the 2-tuple will
simply be the string supplied when the facet value was added to its
document. For facets of type 'float', it will be a 2-tuple, holding
floats giving the start and end of the suggested value range.
The second item in the 2-tuple will be the frequency of the facet
value or range in the result set.
If required_facets is not None, it must be a field name, or a sequence
of field names. Any field names mentioned in required_facets will be
returned if there are any facet values at all in the search results for
that field. The facet will only be omitted if there are no facet
values at all for the field.
The value of maxfacets will be respected as far as possible; the
exception is that if there are too many fields listed in
required_facets with at least one value in the search results, extra
facets will be returned (ie, obeying the required_facets parameter is
considered more important than the maxfacets parameter).
If facet_hierarchy was indicated when search() was called, and the
query included facets, then only subfacets of those query facets and
top-level facets will be included in the returned list. Furthermore
top-level facets will only be returned if there are remaining places
in the list after it has been filled with subfacets. Note that
required_facets is still respected regardless of the facet hierarchy.
If a query type was specified when search() was called, and the query
included facets, then facets with an association of Never to the
query type are never returned, even if mentioned in required_facets.
Facets with an association of Preferred are listed before others in
the returned list.
"""
if 'facets' in _checkxapian.missing_features:
raise errors.SearchError("Facets unsupported with this release of xapian")
if self._facetspy is None:
raise _errors.SearchError("Facet selection wasn't enabled when the search was run")
if isinstance(required_facets, basestring):
required_facets = [required_facets]
scores = []
facettypes = {}
for field, slot, kwargslist in self._facetfields:
type = None
for kwargs in kwargslist:
type = kwargs.get('type', None)
if type is not None: break
if type is None: type = 'string'
if type == 'float':
if field not in self._numeric_ranges_built:
self._facetspy.build_numeric_ranges(slot, desired_num_of_categories)
self._numeric_ranges_built[field] = None
facettypes[field] = type
score = self._facetspy.score_categorisation(slot, desired_num_of_categories)
scores.append((score, field, slot))
# Sort on whether facet is top-level ahead of score (use subfacets first),
# and on whether facet is preferred for the query type ahead of anything else
if self._facethierarchy:
# Note, tuple[-2] is the value of 'field' in a scores tuple
scores = [(tuple[-2] not in self._facethierarchy,) + tuple for tuple in scores]
if self._facetassocs:
preferred = _indexerconnection.IndexerConnection.FacetQueryType_Preferred
scores = [(self._facetassocs.get(tuple[-2]) != preferred,) + tuple for tuple in scores]
scores.sort()
if self._facethierarchy:
index = 1
else:
index = 0
if self._facetassocs:
index += 1
if index > 0:
scores = [tuple[index:] for tuple in scores]
results = []
required_results = []
for score, field, slot in scores:
# Check if the facet is required
required = False
if required_facets is not None:
required = field in required_facets
# If we've got enough facets, and the field isn't required, skip it
if not required and len(results) + len(required_results) >= maxfacets:
continue
# Get the values
values = self._facetspy.get_values_as_dict(slot)
if field in self._numeric_ranges_built:
if '' in values:
del values['']
# Required facets must occur at least once, other facets must occur
# at least twice.
if required:
if len(values) < 1:
continue
else:
if len(values) <= 1:
continue
newvalues = []
if facettypes[field] == 'float':
# Convert numbers to python numbers, and number ranges to a
# python tuple of two numbers.
for value, frequency in values.iteritems():
if len(value) <= 9:
value1 = _log(_xapian.sortable_unserialise, value)
value2 = value1
else:
value1 = _log(_xapian.sortable_unserialise, value[:9])
value2 = _log(_xapian.sortable_unserialise, value[9:])
newvalues.append(((value1, value2), frequency))
else:
for value, frequency in values.iteritems():
newvalues.append((value, frequency))
newvalues.sort()
if required:
required_results.append((score, field, newvalues))
else:
results.append((score, field, newvalues))
# Throw away any excess results if we have more required_results to
# insert.
maxfacets = maxfacets - len(required_results)
if maxfacets <= 0:
results = required_results
else:
results = results[:maxfacets]
results.extend(required_results)
results.sort()
# Throw away the scores because they're not meaningful outside this
# algorithm.
results = [(field, newvalues) for (score, field, newvalues) in results]
return results
class SearchConnection(object):
"""A connection to the search engine for searching.
The connection will access a view of the database.
"""
_qp_flags_base = _xapian.QueryParser.FLAG_LOVEHATE
_qp_flags_phrase = _xapian.QueryParser.FLAG_PHRASE
_qp_flags_synonym = (_xapian.QueryParser.FLAG_AUTO_SYNONYMS |
_xapian.QueryParser.FLAG_AUTO_MULTIWORD_SYNONYMS)
_qp_flags_bool = _xapian.QueryParser.FLAG_BOOLEAN
_index = None
def __init__(self, indexpath):
"""Create a new connection to the index for searching.
There may only an arbitrary number of search connections for a
particular database open at a given time (regardless of whether there
is a connection for indexing open as well).
If the database doesn't exist, an exception will be raised.
"""
self._index = _log(_xapian.Database, indexpath)
self._indexpath = indexpath
# Read the actions.
self._load_config()
self._close_handlers = []
def __del__(self):
self.close()
def append_close_handler(self, handler, userdata=None):
"""Append a callback to the list of close handlers.
These will be called when the SearchConnection is closed. This happens
when the close() method is called, or when the SearchConnection object
is deleted. The callback will be passed two arguments: the path to the
SearchConnection object, and the userdata supplied to this method.
The handlers will be called in the order in which they were added.
The handlers will be called after the connection has been closed, so
cannot prevent it closing: their return value will be ignored. In
addition, they should not raise any exceptions.
"""
self._close_handlers.append((handler, userdata))
def _get_sort_type(self, field):
"""Get the sort type that should be used for a given field.
"""
try:
actions = self._field_actions[field]._actions
except KeyError:
actions = {}
for action, kwargslist in actions.iteritems():
if action == FieldActions.SORT_AND_COLLAPSE:
for kwargs in kwargslist:
return kwargs['type']
def _load_config(self):
"""Load the configuration for the database.
"""
# Note: this code is basically duplicated in the IndexerConnection
# class. Move it to a shared location.
assert self._index is not None
config_str = _log(self._index.get_metadata, '_xappy_config')
if len(config_str) == 0:
self._field_actions = {}
self._field_mappings = _fieldmappings.FieldMappings()
self._facet_hierarchy = {}
self._facet_query_table = {}
return
try:
(self._field_actions, mappings, self._facet_hierarchy, self._facet_query_table, self._next_docid) = _cPickle.loads(config_str)
except ValueError:
# Backwards compatibility - configuration used to lack _facet_hierarchy and _facet_query_table
(self._field_actions, mappings, self._next_docid) = _cPickle.loads(config_str)
self._facet_hierarchy = {}
self._facet_query_table = {}
self._field_mappings = _fieldmappings.FieldMappings(mappings)
def reopen(self):
"""Reopen the connection.
This updates the revision of the index which the connection references
to the latest flushed revision.
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
self._index.reopen()
# Re-read the actions.
self._load_config()
def close(self):
"""Close the connection to the database.
It is important to call this method before allowing the class to be
garbage collected to ensure that the connection is cleaned up promptly.
No other methods may be called on the connection after this has been
called. (It is permissible to call close() multiple times, but
only the first call will have any effect.)
If an exception occurs, the database will be closed, but changes since
the last call to flush may be lost.
"""
if self._index is None:
return
# Remember the index path
indexpath = self._indexpath
# There is currently no "close()" method for xapian databases, so
# we have to rely on the garbage collector. Since we never copy
# the _index property out of this class, there should be no cycles,
# so the standard python implementation should garbage collect
# _index straight away. A close() method is planned to be added to
# xapian at some point - when it is, we should call it here to make
# the code more robust.
self._index = None
self._indexpath = None
self._field_actions = None
self._field_mappings = None
# Call the close handlers.
for handler, userdata in self._close_handlers:
try:
handler(indexpath, userdata)
except Exception, e:
import sys, traceback
print >>sys.stderr, "WARNING: unhandled exception in handler called by SearchConnection.close(): %s" % traceback.format_exception_only(type(e), e)
def get_doccount(self):
"""Count the number of documents in the database.
This count will include documents which have been added or removed but
not yet flushed().
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
return self._index.get_doccount()
OP_AND = _xapian.Query.OP_AND
OP_OR = _xapian.Query.OP_OR
def query_composite(self, operator, queries):
"""Build a composite query from a list of queries.
The queries are combined with the supplied operator, which is either
SearchConnection.OP_AND or SearchConnection.OP_OR.
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
return _log(_xapian.Query, operator, list(queries))
def query_multweight(self, query, multiplier):
"""Build a query which modifies the weights of a subquery.
This produces a query which returns the same documents as the subquery,
and in the same order, but with the weights assigned to each document
multiplied by the value of "multiplier". "multiplier" may be any floating
point value, but negative values will be clipped to 0, since Xapian
doesn't support negative weights.
This can be useful when producing queries to be combined with
query_composite, because it allows the relative importance of parts of
the query to be adjusted.
"""
return _log(_xapian.Query, _xapian.Query.OP_SCALE_WEIGHT, query, multiplier)
def query_filter(self, query, filter, exclude=False):
"""Filter a query with another query.
If exclude is False (or not specified), documents will only match the
resulting query if they match the both the first and second query: the
results of the first query are "filtered" to only include those which
also match the second query.
If exclude is True, documents will only match the resulting query if
they match the first query, but not the second query: the results of
the first query are "filtered" to only include those which do not match
the second query.
Documents will always be weighted according to only the first query.
- `query`: The query to filter.
- `filter`: The filter to apply to the query.
- `exclude`: If True, the sense of the filter is reversed - only
documents which do not match the second query will be returned.
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
if not isinstance(filter, _xapian.Query):
raise _errors.SearchError("Filter must be a Xapian Query object")
if exclude:
return _log(_xapian.Query, _xapian.Query.OP_AND_NOT, query, filter)
else:
return _log(_xapian.Query, _xapian.Query.OP_FILTER, query, filter)
def query_adjust(self, primary, secondary):
"""Adjust the weights of one query with a secondary query.
Documents will be returned from the resulting query if and only if they
match the primary query (specified by the "primary" parameter).
However, the weights (and hence, the relevance rankings) of the
documents will be adjusted by adding weights from the secondary query
(specified by the "secondary" parameter).
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
return _log(_xapian.Query, _xapian.Query.OP_AND_MAYBE, primary, secondary)
def query_range(self, field, begin, end):
"""Create a query for a range search.
This creates a query which matches only those documents which have a
field value in the specified range.
Begin and end must be appropriate values for the field, according to
the 'type' parameter supplied to the SORTABLE action for the field.
The begin and end values are both inclusive - any documents with a
value equal to begin or end will be returned (unless end is less than
begin, in which case no documents will be returned).
Begin or end may be set to None in order to create an open-ended
range. (They may also both be set to None, which will generate a query
which matches all documents containing any value for the field.)
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
if begin is None and end is None:
# Return a "match everything" query
return _log(_xapian.Query, '')
try:
slot = self._field_mappings.get_slot(field, 'collsort')
except KeyError:
# Return a "match nothing" query
return _log(_xapian.Query)
sorttype = self._get_sort_type(field)
marshaller = SortableMarshaller(False)
fn = marshaller.get_marshall_function(field, sorttype)
if begin is not None:
begin = fn(field, begin)
if end is not None:
end = fn(field, end)
if begin is None:
return _log(_xapian.Query, _xapian.Query.OP_VALUE_LE, slot, end)
if end is None:
return _log(_xapian.Query, _xapian.Query.OP_VALUE_GE, slot, begin)
return _log(_xapian.Query, _xapian.Query.OP_VALUE_RANGE, slot, begin, end)
def query_facet(self, field, val):
"""Create a query for a facet value.
This creates a query which matches only those documents which have a
facet value in the specified range.
For a numeric range facet, val should be a tuple holding the start and
end of the range, or a comma separated string holding two floating
point values. For other facets, val should be the value to look
for.
The start and end values are both inclusive - any documents with a
value equal to start or end will be returned (unless end is less than
start, in which case no documents will be returned).
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
if 'facets' in _checkxapian.missing_features:
raise errors.SearchError("Facets unsupported with this release of xapian")
try:
actions = self._field_actions[field]._actions
except KeyError:
actions = {}
facettype = None
for action, kwargslist in actions.iteritems():
if action == FieldActions.FACET:
for kwargs in kwargslist:
facettype = kwargs.get('type', None)
if facettype is not None:
break
if facettype is not None:
break
if facettype == 'float':
if isinstance(val, basestring):
val = [float(v) for v in val.split(',', 2)]
assert(len(val) == 2)
try:
slot = self._field_mappings.get_slot(field, 'facet')
except KeyError:
return _log(_xapian.Query)
# FIXME - check that sorttype == self._get_sort_type(field)
sorttype = 'float'
marshaller = SortableMarshaller(False)
fn = marshaller.get_marshall_function(field, sorttype)
begin = fn(field, val[0])
end = fn(field, val[1])
return _log(_xapian.Query, _xapian.Query.OP_VALUE_RANGE, slot, begin, end)
else:
assert(facettype == 'string' or facettype is None)
prefix = self._field_mappings.get_prefix(field)
return _log(_xapian.Query, prefix + val.lower())
def _prepare_queryparser(self, allow, deny, default_op, default_allow,
default_deny):
"""Prepare (and return) a query parser using the specified fields and
operator.
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
if isinstance(allow, basestring):
allow = (allow, )
if isinstance(deny, basestring):
deny = (deny, )
if allow is not None and len(allow) == 0:
allow = None
if deny is not None and len(deny) == 0:
deny = None
if allow is not None and deny is not None:
raise _errors.SearchError("Cannot specify both `allow` and `deny` "
"(got %r and %r)" % (allow, deny))
if isinstance(default_allow, basestring):
default_allow = (default_allow, )
if isinstance(default_deny, basestring):
default_deny = (default_deny, )
if default_allow is not None and len(default_allow) == 0:
default_allow = None
if default_deny is not None and len(default_deny) == 0:
default_deny = None
if default_allow is not None and default_deny is not None:
raise _errors.SearchError("Cannot specify both `default_allow` and `default_deny` "
"(got %r and %r)" % (default_allow, default_deny))
qp = _log(_xapian.QueryParser)
qp.set_database(self._index)
qp.set_default_op(default_op)
if allow is None:
allow = [key for key in self._field_actions]
if deny is not None:
allow = [key for key in allow if key not in deny]
for field in allow:
try:
actions = self._field_actions[field]._actions
except KeyError:
actions = {}
for action, kwargslist in actions.iteritems():
if action == FieldActions.INDEX_EXACT:
# FIXME - need patched version of xapian to add exact prefixes
#qp.add_exact_prefix(field, self._field_mappings.get_prefix(field))
qp.add_prefix(field, self._field_mappings.get_prefix(field))
if action == FieldActions.INDEX_FREETEXT:
allow_field_specific = True
for kwargs in kwargslist:
allow_field_specific = allow_field_specific or kwargs.get('allow_field_specific', True)
if not allow_field_specific:
continue
qp.add_prefix(field, self._field_mappings.get_prefix(field))
for kwargs in kwargslist:
try:
lang = kwargs['language']
my_stemmer = _log(_xapian.Stem, lang)
qp.my_stemmer = my_stemmer
qp.set_stemmer(my_stemmer)
qp.set_stemming_strategy(qp.STEM_SOME)
except KeyError:
pass
if default_allow is not None or default_deny is not None:
if default_allow is None:
default_allow = [key for key in self._field_actions]
if default_deny is not None:
default_allow = [key for key in default_allow if key not in default_deny]
for field in default_allow:
try:
actions = self._field_actions[field]._actions
except KeyError:
actions = {}
for action, kwargslist in actions.iteritems():
if action == FieldActions.INDEX_FREETEXT:
qp.add_prefix('', self._field_mappings.get_prefix(field))
# FIXME - set stemming options for the default prefix
return qp
def _query_parse_with_prefix(self, qp, string, flags, prefix):
"""Parse a query, with an optional prefix.
"""
if prefix is None:
return qp.parse_query(string, flags)
else:
return qp.parse_query(string, flags, prefix)
def _query_parse_with_fallback(self, qp, string, prefix=None):
"""Parse a query with various flags.
If the initial boolean pass fails, fall back to not using boolean
operators.
"""
try:
q1 = self._query_parse_with_prefix(qp, string,
self._qp_flags_base |
self._qp_flags_phrase |
self._qp_flags_synonym |
self._qp_flags_bool,
prefix)
except _xapian.QueryParserError, e:
# If we got a parse error, retry without boolean operators (since
# these are the usual cause of the parse error).
q1 = self._query_parse_with_prefix(qp, string,
self._qp_flags_base |
self._qp_flags_phrase |
self._qp_flags_synonym,
prefix)
qp.set_stemming_strategy(qp.STEM_NONE)
try:
q2 = self._query_parse_with_prefix(qp, string,
self._qp_flags_base |
self._qp_flags_bool,
prefix)
except _xapian.QueryParserError, e:
# If we got a parse error, retry without boolean operators (since
# these are the usual cause of the parse error).
q2 = self._query_parse_with_prefix(qp, string,
self._qp_flags_base,
prefix)
return _log(_xapian.Query, _xapian.Query.OP_AND_MAYBE, q1, q2)
def query_parse(self, string, allow=None, deny=None, default_op=OP_AND,
default_allow=None, default_deny=None):
"""Parse a query string.
This is intended for parsing queries entered by a user. If you wish to
combine structured queries, it is generally better to use the other
query building methods, such as `query_composite` (though you may wish
to create parts of the query to combine with such methods with this
method).
The string passed to this method can have various operators in it. In
particular, it may contain field specifiers (ie, field names, followed
by a colon, followed by some text to search for in that field). For
example, if "author" is a field in the database, the search string
could contain "author:richard", and this would be interpreted as
"search for richard in the author field". By default, any fields in
the database which are indexed with INDEX_EXACT or INDEX_FREETEXT will
be available for field specific searching in this way - however, this
can be modified using the "allow" or "deny" parameters, and also by the
allow_field_specific tag on INDEX_FREETEXT fields.
Any text which isn't prefixed by a field specifier is used to search
the "default set" of fields. By default, this is the full set of
fields in the database which are indexed with INDEX_FREETEXT and for
which the search_by_default flag set (ie, if the text is found in any
of those fields, the query will match). However, this may be modified
with the "default_allow" and "default_deny" parameters. (Note that
fields which are indexed with INDEX_EXACT aren't allowed to be used in
the default list of fields.)
- `string`: The string to parse.
- `allow`: A list of fields to allow in the query.
- `deny`: A list of fields not to allow in the query.
- `default_op`: The default operator to combine query terms with.
- `default_allow`: A list of fields to search for by default.
- `default_deny`: A list of fields not to search for by default.
Only one of `allow` and `deny` may be specified.
Only one of `default_allow` and `default_deny` may be specified.
If any of the entries in `allow` are not present in the configuration
for the database, or are not specified for indexing (either as
INDEX_EXACT or INDEX_FREETEXT), they will be ignored. If any of the
entries in `deny` are not present in the configuration for the
database, they will be ignored.
Returns a Query object, which may be passed to the search() method, or
combined with other queries.
"""
qp = self._prepare_queryparser(allow, deny, default_op, default_allow,
default_deny)
return self._query_parse_with_fallback(qp, string)
def query_field(self, field, value, default_op=OP_AND):
"""A query for a single field.
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
try:
actions = self._field_actions[field]._actions
except KeyError:
actions = {}
# need to check on field type, and stem / split as appropriate
for action, kwargslist in actions.iteritems():
if action in (FieldActions.INDEX_EXACT,
FieldActions.TAG,
FieldActions.FACET,):
prefix = self._field_mappings.get_prefix(field)
if len(value) > 0:
chval = ord(value[0])
if chval >= ord('A') and chval <= ord('Z'):
prefix = prefix + ':'
return _log(_xapian.Query, prefix + value)
if action == FieldActions.INDEX_FREETEXT:
qp = _log(_xapian.QueryParser)
qp.set_default_op(default_op)
prefix = self._field_mappings.get_prefix(field)
for kwargs in kwargslist:
try:
lang = kwargs['language']
qp.set_stemmer(_log(_xapian.Stem, lang))
qp.set_stemming_strategy(qp.STEM_SOME)
except KeyError:
pass
return self._query_parse_with_fallback(qp, value, prefix)
return _log(_xapian.Query)
def query_similar(self, ids, allow=None, deny=None, simterms=10):
"""Get a query which returns documents which are similar to others.
The list of document IDs to base the similarity search on is given in
`ids`. This should be an iterable, holding a list of strings. If
any of the supplied IDs cannot be found in the database, they will be
ignored. (If no IDs can be found in the database, the resulting query
will not match any documents.)
By default, all fields which have been indexed for freetext searching
will be used for the similarity calculation. The list of fields used
for this can be customised using the `allow` and `deny` parameters
(only one of which may be specified):
- `allow`: A list of fields to base the similarity calculation on.
- `deny`: A list of fields not to base the similarity calculation on.
- `simterms`: Number of terms to use for the similarity calculation.
For convenience, any of `ids`, `allow`, or `deny` may be strings, which
will be treated the same as a list of length 1.
Regardless of the setting of `allow` and `deny`, only fields which have
been indexed for freetext searching will be used for the similarity
measure - all other fields will always be ignored for this purpose.
"""
eterms, prefixes = self._get_eterms(ids, allow, deny, simterms)
# Use the "elite set" operator, which chooses the terms with the
# highest query weight to use.
q = _log(_xapian.Query, _xapian.Query.OP_ELITE_SET, eterms, simterms)
return q
def significant_terms(self, ids, maxterms=10, allow=None, deny=None):
"""Get a set of "significant" terms for a document, or documents.
This has a similar interface to query_similar(): it takes a list of
ids, and an optional specification of a set of fields to consider.
Instead of returning a query, it returns a list of terms from the
document (or documents), which appear "significant". Roughly,
in this situation significant means that the terms occur more
frequently in the specified document than in the rest of the corpus.
The list is in decreasing order of "significance".
By default, all terms related to fields which have been indexed for
freetext searching will be considered for the list of significant
terms. The list of fields used for this can be customised using the
`allow` and `deny` parameters (only one of which may be specified):
- `allow`: A list of fields to consider.
- `deny`: A list of fields not to consider.
For convenience, any of `ids`, `allow`, or `deny` may be strings, which
will be treated the same as a list of length 1.
Regardless of the setting of `allow` and `deny`, only fields which have
been indexed for freetext searching will be considered - all other
fields will always be ignored for this purpose.
The maximum number of terms to return may be specified by the maxterms
parameter.
"""
eterms, prefixes = self._get_eterms(ids, allow, deny, maxterms)
terms = []
for term in eterms:
pos = 0
for char in term:
if not char.isupper():
break
pos += 1
field = prefixes[term[:pos]]
value = term[pos:]
terms.append((field, value))
return terms
def _get_eterms(self, ids, allow, deny, simterms):
"""Get a set of terms for an expand
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
if allow is not None and deny is not None:
raise _errors.SearchError("Cannot specify both `allow` and `deny`")
if isinstance(ids, basestring):
ids = (ids, )
if isinstance(allow, basestring):
allow = (allow, )
if isinstance(deny, basestring):
deny = (deny, )
# Set "allow" to contain a list of all the fields to use.
if allow is None:
allow = [key for key in self._field_actions]
if deny is not None:
allow = [key for key in allow if key not in deny]
# Set "prefixes" to contain a list of all the prefixes to use.
prefixes = {}
for field in allow:
try:
actions = self._field_actions[field]._actions
except KeyError:
actions = {}
for action, kwargslist in actions.iteritems():
if action == FieldActions.INDEX_FREETEXT:
prefixes[self._field_mappings.get_prefix(field)] = field
# Repeat the expand until we don't get a DatabaseModifiedError
while True:
try:
eterms = self._perform_expand(ids, prefixes, simterms)
break;
except _xapian.DatabaseModifiedError, e:
self.reopen()
return eterms, prefixes
class ExpandDecider(_xapian.ExpandDecider):
def __init__(self, prefixes):
_xapian.ExpandDecider.__init__(self)
self._prefixes = prefixes
def __call__(self, term):
pos = 0
for char in term:
if not char.isupper():
break
pos += 1
if term[:pos] in self._prefixes:
return True
return False
def _perform_expand(self, ids, prefixes, simterms):
"""Perform an expand operation to get the terms for a similarity
search, given a set of ids (and a set of prefixes to restrict the
similarity operation to).
"""
# Set idquery to be a query which returns the documents listed in
# "ids".
idquery = _log(_xapian.Query, _xapian.Query.OP_OR, ['Q' + id for id in ids])
enq = _log(_xapian.Enquire, self._index)
enq.set_query(idquery)
rset = _log(_xapian.RSet)
for id in ids:
pl = self._index.postlist('Q' + id)
try:
xapid = pl.next()
rset.add_document(xapid.docid)
except StopIteration:
pass
expanddecider = _log(self.ExpandDecider, prefixes)
eset = enq.get_eset(simterms, rset, 0, 1.0, expanddecider)
return [term.term for term in eset]
def query_all(self):
"""A query which matches all the documents in the database.
"""
return _log(_xapian.Query, '')
def query_none(self):
"""A query which matches no documents in the database.
This may be useful as a placeholder in various situations.
"""
return _log(_xapian.Query)
def spell_correct(self, querystr, allow=None, deny=None, default_op=OP_AND,
default_allow=None, default_deny=None):
"""Correct a query spelling.
This returns a version of the query string with any misspelt words
corrected.
- `allow`: A list of fields to allow in the query.
- `deny`: A list of fields not to allow in the query.
- `default_op`: The default operator to combine query terms with.
- `default_allow`: A list of fields to search for by default.
- `default_deny`: A list of fields not to search for by default.
Only one of `allow` and `deny` may be specified.
Only one of `default_allow` and `default_deny` may be specified.
If any of the entries in `allow` are not present in the configuration
for the database, or are not specified for indexing (either as
INDEX_EXACT or INDEX_FREETEXT), they will be ignored. If any of the
entries in `deny` are not present in the configuration for the
database, they will be ignored.
Note that it is possible that the resulting spell-corrected query will
still match no documents - the user should usually check that some
documents are matched by the corrected query before suggesting it to
users.
"""
qp = self._prepare_queryparser(allow, deny, default_op, default_allow,
default_deny)
try:
qp.parse_query(querystr,
self._qp_flags_base |
self._qp_flags_phrase |
self._qp_flags_synonym |
self._qp_flags_bool |
qp.FLAG_SPELLING_CORRECTION)
except _xapian.QueryParserError:
qp.parse_query(querystr,
self._qp_flags_base |
self._qp_flags_phrase |
self._qp_flags_synonym |
qp.FLAG_SPELLING_CORRECTION)
corrected = qp.get_corrected_query_string()
if len(corrected) == 0:
if isinstance(querystr, unicode):
# Encode as UTF-8 for consistency - this happens automatically
# to values passed to Xapian.
return querystr.encode('utf-8')
return querystr
return corrected
def can_collapse_on(self, field):
"""Check if this database supports collapsing on a specified field.
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
try:
self._field_mappings.get_slot(field, 'collsort')
except KeyError:
return False
return True
def can_sort_on(self, field):
"""Check if this database supports sorting on a specified field.
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
try:
self._field_mappings.get_slot(field, 'collsort')
except KeyError:
return False
return True
def _get_prefix_from_term(self, term):
"""Get the prefix of a term.
Prefixes are any initial capital letters, with the exception that R always
ends a prefix, even if followed by capital letters.
"""
for p in xrange(len(term)):
if term[p].islower():
return term[:p]
elif term[p] == 'R':
return term[:p+1]
return term
def _facet_query_never(self, facet, query_type):
"""Check if a facet must never be returned by a particular query type.
Returns True if the facet must never be returned.
Returns False if the facet may be returned - either becuase there is no
entry for the query type, or because the entry is not
FacetQueryType_Never.
"""
if query_type is None:
return False
if query_type not in self._facet_query_table:
return False
if facet not in self._facet_query_table[query_type]:
return False
return self._facet_query_table[query_type][facet] == _indexerconnection.IndexerConnection.FacetQueryType_Never
def search(self, query, startrank, endrank,
checkatleast=0, sortby=None, collapse=None,
gettags=None,
getfacets=None, allowfacets=None, denyfacets=None, usesubfacets=None,
percentcutoff=None, weightcutoff=None,
query_type=None):
"""Perform a search, for documents matching a query.
- `query` is the query to perform.
- `startrank` is the rank of the start of the range of matching
documents to return (ie, the result with this rank will be returned).
ranks start at 0, which represents the "best" matching document.
- `endrank` is the rank at the end of the range of matching documents
to return. This is exclusive, so the result with this rank will not
be returned.
- `checkatleast` is the minimum number of results to check for: the
estimate of the total number of matches will always be exact if
the number of matches is less than `checkatleast`. A value of ``-1``
can be specified for the checkatleast parameter - this has the
special meaning of "check all matches", and is equivalent to passing
the result of get_doccount().
- `sortby` is the name of a field to sort by. It may be preceded by a
'+' or a '-' to indicate ascending or descending order
(respectively). If the first character is neither '+' or '-', the
sort will be in ascending order.
- `collapse` is the name of a field to collapse the result documents
on. If this is specified, there will be at most one result in the
result set for each value of the field.
- `gettags` is the name of a field to count tag occurrences in, or a
list of fields to do so.
- `getfacets` is a boolean - if True, the matching documents will be
examined to build up a list of the facet values contained in them.
- `allowfacets` is a list of the fieldnames of facets to consider.
- `denyfacets` is a list of fieldnames of facets which will not be
considered.
- `usesubfacets` is a boolean - if True, only top-level facets and
subfacets of facets appearing in the query are considered (taking
precedence over `allowfacets` and `denyfacets`).
- `percentcutoff` is the minimum percentage a result must have to be
returned.
- `weightcutoff` is the minimum weight a result must have to be
returned.
- `query_type` is a value indicating the type of query being
performed. If not None, the value is used to influence which facets
are be returned by the get_suggested_facets() function. If the
value of `getfacets` is False, it has no effect.
If neither 'allowfacets' or 'denyfacets' is specified, all fields
holding facets will be considered (but see 'usesubfacets').
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
if 'facets' in _checkxapian.missing_features:
if getfacets is not None or \
allowfacets is not None or \
denyfacets is not None or \
usesubfacets is not None or \
query_type is not None:
raise errors.SearchError("Facets unsupported with this release of xapian")
if 'tags' in _checkxapian.missing_features:
if gettags is not None:
raise errors.SearchError("Tags unsupported with this release of xapian")
if checkatleast == -1:
checkatleast = self._index.get_doccount()
enq = _log(_xapian.Enquire, self._index)
enq.set_query(query)
if sortby is not None:
asc = True
if sortby[0] == '-':
asc = False
sortby = sortby[1:]
elif sortby[0] == '+':
sortby = sortby[1:]
try:
slotnum = self._field_mappings.get_slot(sortby, 'collsort')
except KeyError:
raise _errors.SearchError("Field %r was not indexed for sorting" % sortby)
# Note: we invert the "asc" parameter, because xapian treats
# "ascending" as meaning "higher values are better"; in other
# words, it considers "ascending" to mean return results in
# descending order.
enq.set_sort_by_value_then_relevance(slotnum, not asc)
if collapse is not None:
try:
slotnum = self._field_mappings.get_slot(collapse, 'collsort')
except KeyError:
raise _errors.SearchError("Field %r was not indexed for collapsing" % collapse)
enq.set_collapse_key(slotnum)
maxitems = max(endrank - startrank, 0)
# Always check for at least one more result, so we can report whether
# there are more matches.
checkatleast = max(checkatleast, endrank + 1)
# Build the matchspy.
matchspies = []
# First, add a matchspy for any gettags fields
if isinstance(gettags, basestring):
if len(gettags) != 0:
gettags = [gettags]
tagspy = None
if gettags is not None and len(gettags) != 0:
tagspy = _log(_xapian.TermCountMatchSpy)
for field in gettags:
try:
prefix = self._field_mappings.get_prefix(field)
tagspy.add_prefix(prefix)
except KeyError:
raise _errors.SearchError("Field %r was not indexed for tagging" % field)
matchspies.append(tagspy)
# add a matchspy for facet selection here.
facetspy = None
facetfields = []
if getfacets:
if allowfacets is not None and denyfacets is not None:
raise _errors.SearchError("Cannot specify both `allowfacets` and `denyfacets`")
if allowfacets is None:
allowfacets = [key for key in self._field_actions]
if denyfacets is not None:
allowfacets = [key for key in allowfacets if key not in denyfacets]
# include None in queryfacets so a top-level facet will
# satisfy self._facet_hierarchy.get(field) in queryfacets
# (i.e. always include top-level facets)
queryfacets = set([None])
if usesubfacets:
# add facets used in the query to queryfacets
termsiter = query.get_terms_begin()
termsend = query.get_terms_end()
while termsiter != termsend:
prefix = self._get_prefix_from_term(termsiter.get_term())
field = self._field_mappings.get_fieldname_from_prefix(prefix)
if field and FieldActions.FACET in self._field_actions[field]._actions:
queryfacets.add(field)
termsiter.next()
for field in allowfacets:
try:
actions = self._field_actions[field]._actions
except KeyError:
actions = {}
for action, kwargslist in actions.iteritems():
if action == FieldActions.FACET:
# filter out non-top-level facets that aren't subfacets
# of a facet in the query
if usesubfacets and self._facet_hierarchy.get(field) not in queryfacets:
continue
# filter out facets that should never be returned for the query type
if self._facet_query_never(field, query_type):
continue
slot = self._field_mappings.get_slot(field, 'facet')
if facetspy is None:
facetspy = _log(_xapian.CategorySelectMatchSpy)
facettype = None
for kwargs in kwargslist:
facettype = kwargs.get('type', None)
if facettype is not None:
break
if facettype is None or facettype == 'string':
facetspy.add_slot(slot, True)
else:
facetspy.add_slot(slot)
facetfields.append((field, slot, kwargslist))
if facetspy is None:
# Set facetspy to False, to distinguish from no facet
# calculation being performed. (This will prevent an
# error being thrown when the list of suggested facets is
# requested - instead, an empty list will be returned.)
facetspy = False
else:
matchspies.append(facetspy)
# Finally, build a single matchspy to pass to get_mset().
if len(matchspies) == 0:
matchspy = None
elif len(matchspies) == 1:
matchspy = matchspies[0]
else:
matchspy = _log(_xapian.MultipleMatchDecider)
for spy in matchspies:
matchspy.append(spy)
enq.set_docid_order(enq.DONT_CARE)
# Set percentage and weight cutoffs
if percentcutoff is not None or weightcutoff is not None:
if percentcutoff is None:
percentcutoff = 0
if weightcutoff is None:
weightcutoff = 0
enq.set_cutoff(percentcutoff, weightcutoff)
# Repeat the search until we don't get a DatabaseModifiedError
while True:
try:
if matchspy is None:
mset = enq.get_mset(startrank, maxitems, checkatleast)
else:
mset = enq.get_mset(startrank, maxitems, checkatleast,
None, None, matchspy)
break
except _xapian.DatabaseModifiedError, e:
self.reopen()
facet_hierarchy = None
if usesubfacets:
facet_hierarchy = self._facet_hierarchy
return SearchResults(self, enq, query, mset, self._field_mappings,
tagspy, gettags, facetspy, facetfields,
facet_hierarchy,
self._facet_query_table.get(query_type))
def iterids(self):
"""Get an iterator which returns all the ids in the database.
The unqiue_ids are currently returned in binary lexicographical sort
order, but this should not be relied on.
Note that the iterator returned by this method may raise a
xapian.DatabaseModifiedError exception if modifications are committed
to the database while the iteration is in progress. If this happens,
the search connection must be reopened (by calling reopen) and the
iteration restarted.
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
return _indexerconnection.PrefixedTermIter('Q', self._index.allterms())
def get_document(self, id):
"""Get the document with the specified unique ID.
Raises a KeyError if there is no such document. Otherwise, it returns
a ProcessedDocument.
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
while True:
try:
postlist = self._index.postlist('Q' + id)
try:
plitem = postlist.next()
except StopIteration:
# Unique ID not found
raise KeyError('Unique ID %r not found' % id)
try:
postlist.next()
raise _errors.IndexerError("Multiple documents " #pragma: no cover
"found with same unique ID")
except StopIteration:
# Only one instance of the unique ID found, as it should be.
pass
result = ProcessedDocument(self._field_mappings)
result.id = id
result._doc = self._index.get_document(plitem.docid)
return result
except _xapian.DatabaseModifiedError, e:
self.reopen()
def iter_synonyms(self, prefix=""):
"""Get an iterator over the synonyms.
- `prefix`: if specified, only synonym keys with this prefix will be
returned.
The iterator returns 2-tuples, in which the first item is the key (ie,
a 2-tuple holding the term or terms which will be synonym expanded,
followed by the fieldname specified (or None if no fieldname)), and the
second item is a tuple of strings holding the synonyms for the first
item.
These return values are suitable for the dict() builtin, so you can
write things like:
>>> conn = _indexerconnection.IndexerConnection('foo')
>>> conn.add_synonym('foo', 'bar')
>>> conn.add_synonym('foo bar', 'baz')
>>> conn.add_synonym('foo bar', 'foo baz')
>>> conn.flush()
>>> conn = SearchConnection('foo')
>>> dict(conn.iter_synonyms())
{('foo', None): ('bar',), ('foo bar', None): ('baz', 'foo baz')}
"""
if self._index is None:
raise _errors.SearchError("SearchConnection has been closed")
return _indexerconnection.SynonymIter(self._index, self._field_mappings, prefix)
def get_metadata(self, key):
"""Get an item of metadata stored in the connection.
This returns a value stored by a previous call to
IndexerConnection.set_metadata.
If the value is not found, this will return the empty string.
"""
if self._index is None:
raise _errors.IndexerError("SearchConnection has been closed")
if not hasattr(self._index, 'get_metadata'):
raise _errors.IndexerError("Version of xapian in use does not support metadata")
return _log(self._index.get_metadata, key)
if __name__ == '__main__':
import doctest, sys
doctest.testmod (sys.modules[__name__])
|
gdkar/pyglet
|
refs/heads/master
|
contrib/scene2d/examples/los.py
|
29
|
# Lots Of Sprites
'''
Results (us per sprite per frame):
sprites AMD64/mesa AMD64/nv6.6k MacBook Pro AMD/nv7.8k
2000 28.3 29.3 20.6 22.0
after __slots__ removal
sprites AMD64/mesa AMD64/nv6.6k MacBook Pro AMD/nv7.8k
2000
'''
import os
import sys
import random
from pyglet import options
options['debug_gl'] = False
from pyglet.window import Window
from pyglet import clock
from scene2d import *
from pyglet.gl import *
w = Window(600, 600, vsync=False)
img = Image2d.load('examples/noisy/ball.png')
class BouncySprite(Sprite):
dx = dy = 0
def update(self):
# move, check bounds
p = self.properties
self.x += self.dx; self.y += self.dy
if self.x < 0: self.x = 0; self.dx = -self.dx
elif self.right > 600: self.right = 600; self.dx = -self.dx
if self.y < 0: self.y = 0; self.dy = -self.dy
elif self.top > 600: self.top = 600; self.dy = -self.dy
sprites = []
numsprites = int(sys.argv[1])
for i in range(numsprites):
x = random.randint(0, w.width-img.width)
y = random.randint(0, w.height-img.height)
s = BouncySprite(x, y, img.width, img.height, img)
s.dx = random.randint(-10, 10)
s.dy = random.randint(-10, 10)
sprites.append(s)
view = FlatView.from_window(w, sprites=sprites)
view.fx, view.fy = w.width/2, w.height/2
t = 0
numframes = 0
while 1:
if w.has_exit:
print 'FPS:', clock.get_fps()
print 'us per sprite:', float(t) / (numsprites * numframes) * 1000000
break
t += clock.tick()
w.dispatch_events()
for s in sprites: s.update()
view.clear()
view.draw()
w.flip()
numframes += 1
w.close()
|
pitatensai/you-get
|
refs/heads/develop
|
src/you_get/extractors/vidto.py
|
17
|
#!/usr/bin/env python
__all__ = ['vidto_download']
from ..common import *
import pdb
import time
def vidto_download(url, output_dir='.', merge=True, info_only=False):
html = get_content(url)
params = {}
r = re.findall(
r'type="(?:hidden|submit)?"(?:.*?)name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
params[name] = value
data = parse.urlencode(params).encode('utf-8')
req = request.Request(url)
print("Please wait for 6 seconds...")
time.sleep(6)
print("Starting")
new_html = request.urlopen(req, data).read().decode('utf-8', 'replace')
new_stff = re.search('lnk_download" href="(.*?)">', new_html)
if(new_stff):
url = new_stff.group(1)
title = params['fname']
type = ""
ext = ""
a, b, size = url_info(url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
else:
print("cannot find link, please review")
pdb.set_trace()
site_info = "vidto.me"
download = vidto_download
download_playlist = playlist_not_supported('vidto')
|
ifiddes/pycbio
|
refs/heads/master
|
pycbio/hgdata/psl.py
|
1
|
# Copyright 2006-2012 Mark Diekhans
import copy
from pycbio.hgdata.autoSql import intArraySplit, intArrayJoin, strArraySplit, strArrayJoin
from pycbio.sys import fileOps, dbOps
from pycbio.sys.multiDict import MultiDict
from pycbio.hgdata.rangeFinder import Binner
from Bio.Seq import reverse_complement
# FIXME: Should have factory rather than __init__ multiplexing nonsense **
# FIXME: should have builder functions
def rcStrand(s):
"return reverse-complement of a strand character"
return "+" if (s == "-") else "-"
class PslBlock(object):
"""Block of a PSL"""
__slots__ = ("psl", "iBlk", "qStart", "qEnd", "tStart", "tEnd", "size", "qSeq", "tSeq")
def __init__(self, psl, qStart, tStart, size, qSeq=None, tSeq=None):
"sets iBlk base on being added in ascending order"
self.psl = psl
self.iBlk = len(psl.blocks)
self.qStart= qStart
self.qEnd = qStart + size
self.tStart = tStart
self.tEnd = tStart + size
self.size = size
self.qSeq = qSeq
self.tSeq = tSeq
def __len__(self):
return self.size
def __str__(self):
return str(self.qStart) + ".." + str(self.qEnd) + " <=> " + str(self.tStart) + ".." + str(self.tEnd)
def getQStartPos(self):
"get qStart for the block on positive strand"
if self.psl.getQStrand() == '+':
return self.qStart
else:
return self.psl.qSize - self.qEnd
def getQEndPos(self):
"get qEnd for the block on positive strand"
if self.psl.getQStrand() == '+':
return self.qEnd
else:
return self.psl.qSize - self.qStart
def getTStartPos(self):
"get tStart for the block on positive strand"
if self.psl.getTStrand() == '+':
return self.tStart
else:
return self.psl.tSize - self.tEnd
def getTEndPos(self):
"get tEnd for the block on positive strand"
if self.psl.getTStrand() == '+':
return self.tEnd
else:
return self.psl.tSize - self.tStart
def sameAlign(self, other):
"compare for equality of alignment."
return (other is not None) and (self.qStart == other.qStart) and (self.tStart == other.tStart) and (self.size == other.size) and (self.qSeq == other.qSeq) and (self.tSeq == other.tSeq)
def reverseComplement(self, newPsl):
"construct a block that is the reverse complement of this block"
return PslBlock(newPsl, self.psl.qSize-self.qEnd, self.psl.tSize-self.tEnd, self.size,
(reverse_complement(self.qSeq) if (self.qSeq is not None) else None),
(reverse_complement(self.tSeq) if (self.tSeq is not None) else None))
def swapSides(self, newPsl):
"construct a block with query and target swapped "
return PslBlock(newPsl, self.tStart, self.qStart, self.size, self.tSeq, self.qSeq)
def swapSidesReverseComplement(self, newPsl):
"construct a block with query and target swapped and reverse complemented "
return PslBlock(newPsl, self.psl.tSize-self.tEnd, self.psl.qSize-self.qEnd, self.size,
(reverse_complement(self.tSeq) if (self.tSeq is not None) else None),
(reverse_complement(self.qSeq) if (self.qSeq is not None) else None))
class Psl(object):
"""Object containing data from a PSL record."""
__slots__ = ("match", "misMatch", "repMatch", "nCount", "qNumInsert", "qBaseInsert", "tNumInsert", "tBaseInsert", "strand", "qName", "qSize", "qStart", "qEnd", "tName", "tSize", "tStart", "tEnd", "blockCount", "blocks")
def __parseBlocks(self, blockSizesStr, qStartsStr, tStartsStr, qSeqsStr, tSeqsStr):
"convert parallel arrays to PslBlock objects"
self.blocks = []
blockSizes = intArraySplit(blockSizesStr)
qStarts = intArraySplit(qStartsStr)
tStarts = intArraySplit(tStartsStr)
haveSeqs = (qSeqsStr is not None)
if haveSeqs:
qSeqs = strArraySplit(qSeqsStr)
tSeqs = strArraySplit(tSeqsStr)
for i in xrange(self.blockCount):
self.blocks.append(PslBlock(self, qStarts[i], tStarts[i], blockSizes[i],
(qSeqs[i] if haveSeqs else None),
(tSeqs[i] if haveSeqs else None)))
def __parse(self, row):
self.match = int(row[0])
self.misMatch = int(row[1])
self.repMatch = int(row[2])
self.nCount = int(row[3])
self.qNumInsert = int(row[4])
self.qBaseInsert = int(row[5])
self.tNumInsert = int(row[6])
self.tBaseInsert = int(row[7])
self.strand = row[8]
self.qName = row[9]
self.qSize = int(row[10])
self.qStart = int(row[11])
self.qEnd = int(row[12])
self.tName = row[13]
self.tSize = int(row[14])
self.tStart = int(row[15])
self.tEnd = int(row[16])
self.blockCount = int(row[17])
haveSeqs = len(row) > 21
self.__parseBlocks(row[18], row[19], row[20],
(row[21] if haveSeqs else None),
(row[22] if haveSeqs else None))
def __loadDb(self, row, dbColIdxMap):
# FIXME: change to use DictCursor
self.match = row[dbColIdxMap["matches"]]
self.misMatch = row[dbColIdxMap["misMatches"]]
self.repMatch = row[dbColIdxMap["repMatches"]]
self.nCount = row[dbColIdxMap["nCount"]]
self.qNumInsert = row[dbColIdxMap["qNumInsert"]]
self.qBaseInsert = row[dbColIdxMap["qBaseInsert"]]
self.tNumInsert = row[dbColIdxMap["tNumInsert"]]
self.tBaseInsert = row[dbColIdxMap["tBaseInsert"]]
self.strand = row[dbColIdxMap["strand"]]
self.qName = row[dbColIdxMap["qName"]]
self.qSize = row[dbColIdxMap["qSize"]]
self.qStart = row[dbColIdxMap["qStart"]]
self.qEnd = row[dbColIdxMap["qEnd"]]
self.tName = row[dbColIdxMap["tName"]]
self.tSize = row[dbColIdxMap["tSize"]]
self.tStart = row[dbColIdxMap["tStart"]]
self.tEnd = row[dbColIdxMap["tEnd"]]
self.blockCount = row[dbColIdxMap["blockCount"]]
haveSeqs = "qSeqs" in dbColIdxMap
self.__parseBlocks(row[dbColIdxMap["blockSizes"]], row[dbColIdxMap["qStarts"]], row[dbColIdxMap["tStarts"]],
(row[dbColIdxMap["qSeqs"]] if haveSeqs else None),
(row[dbColIdxMap["tSeqs"]] if haveSeqs else None))
def __empty(self):
self.match = 0
self.misMatch = 0
self.repMatch = 0
self.nCount = 0
self.qNumInsert = 0
self.qBaseInsert = 0
self.tNumInsert = 0
self.tBaseInsert = 0
self.strand = None
self.qName = None
self.qSize = 0
self.qStart = 0
self.qEnd = 0
self.tName = None
self.tSize = 0
self.tStart = 0
self.tEnd = 0
self.blockCount = 0
self.blocks = []
def __init__(self, row=None, dbColIdxMap=None):
"""construct a new PSL, either parsing a row, loading a row from a
dbapi cursor (dbColIdxMap created by sys.dbOpts.cursorColIdxMap), or
creating an empty one."""
if dbColIdxMap is not None:
self.__loadDb(row, dbColIdxMap)
elif row is not None:
self.__parse(row)
else:
self.__empty()
def getQStrand(self):
return self.strand[0]
def getTStrand(self):
return (self.strand[1] if len(self.strand) > 1 else "+")
def qRevRange(self, start, end):
"reverse a query range to the other strand"
return (self.qSize-end, self.qSize-start)
def tRevRange(self, start, end):
"reverse a query range to the other strand"
return (self.tSize-end, self.tSize-start)
def qRangeToPos(self, start, end):
"convert a query range in alignment coordinates to positive strand coordinates"
if self.getQStrand() == "+":
return (start, end)
else:
return (self.qSize-end, self.qSize-start)
def tRangeToPos(self, start, end):
"convert a target range in alignment coordinates to positive strand coordinates"
if self.getTStrand() == "+":
return (start, end)
else:
return (self.tSize-end, self.tSize-start)
def isProtein(self):
lastBlock = self.blockCount - 1
if len(self.strand) < 2:
return False
return (((self.strand[1] == '+' ) and
(self.tEnd == self.tStarts[lastBlock] + 3*self.blockSizes[lastBlock]))
or
((self.strand[1] == '-') and
(self.tStart == (self.tSize-(self.tStarts[lastBlock] + 3*self.blockSizes[lastBlock])))))
def tOverlap(self, tName, tStart, tEnd):
"test for overlap of target range"
return (tName == self.tName) and (tStart < self.tEnd) and (tEnd > self.tStart)
def tBlkOverlap(self, tStart, tEnd, iBlk):
"does the specified block overlap the target range"
return (tStart < self.getTEndPos(iBlk)) and (tEnd > self.getTStartPos(iBlk))
def __str__(self):
"return psl as a tab-separated string"
row = [str(self.match),
str(self.misMatch),
str(self.repMatch),
str(self.nCount),
str(self.qNumInsert),
str(self.qBaseInsert),
str(self.tNumInsert),
str(self.tBaseInsert),
self.strand,
self.qName,
str(self.qSize),
str(self.qStart),
str(self.qEnd),
self.tName,
str(self.tSize),
str(self.tStart),
str(self.tEnd),
str(self.blockCount),
intArrayJoin([b.size for b in self.blocks]),
intArrayJoin([b.qStart for b in self.blocks]),
intArrayJoin([b.tStart for b in self.blocks])]
if self.blocks[0].qSeq is not None:
row.append(strArrayJoin([b.qSeq for b in self.blocks]))
row.append(strArrayJoin([b.tSeq for b in self.blocks]))
return str.join("\t", row)
def write(self, fh):
"""write psl to a tab-seperated file"""
fh.write(str(self))
fh.write('\n')
@staticmethod
def queryCmp(psl1, psl2):
"sort compairson using query address"
cmp = string.cmp(psl1.qName, psl2.qName)
if cmp != 0:
cmp = psl1.qStart - psl2.qStart
if cmp != 0:
cmp = psl1.qEnd - psl2.qEnd
return cmp
@staticmethod
def targetCmp(psl1, psl2):
"sort compairson using target address"
cmp = string.cmp(psl1.tName, psl2.tName)
if cmp != 0:
cmp = psl1.tStart - psl2.tStart
if cmp != 0:
cmp = psl1.tEnd - psl2.tEnd
return cmp
def __eq__(self, other):
"compare for equality of alignment"
if ((not isinstance(other, self.__class__))
or (self.match != other.match)
or (self.misMatch != other.misMatch)
or (self.repMatch != other.repMatch)
or (self.nCount != other.nCount)
or (self.qNumInsert != other.qNumInsert)
or (self.qBaseInsert != other.qBaseInsert)
or (self.tNumInsert != other.tNumInsert)
or (self.tBaseInsert != other.tBaseInsert)
or (self.strand != other.strand)
or (self.qName != other.qName)
or (self.qSize != other.qSize)
or (self.qStart != other.qStart)
or (self.qEnd != other.qEnd)
or (self.tName != other.tName)
or (self.tSize != other.tSize)
or (self.tStart != other.tStart)
or (self.tEnd != other.tEnd)
or (self.blockCount != other.blockCount)):
return False
for i in xrange(self.blockCount):
if not self.blocks[i].sameAlign(other.blocks[i]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def sameAlign(self, other):
"compare for equality of alignment. The stats fields are not compared."
if ((other is None)
or (self.strand != other.strand)
or (self.qName != other.qName)
or (self.qSize != other.qSize)
or (self.qStart != other.qStart)
or (self.qEnd != other.qEnd)
or (self.tName != other.tName)
or (self.tSize != other.tSize)
or (self.tStart != other.tStart)
or (self.tEnd != other.tEnd)
or (self.blockCount != other.blockCount)):
return False
for i in xrange(self.blockCount):
if not self.blocks[i].sameAlign(other.blocks[i]):
return False
return True
def __hash__(self):
return hash(self.tName) + hash(self.tStart)
def identity(self):
aligned = float(self.match + self.misMatch + self.repMatch)
if aligned == 0.0:
return 0.0
else:
return float(self.match + self.repMatch)/aligned
def basesAligned(self):
return self.match + self.misMatch + self.repMatch
def queryAligned(self):
return float(self.match + self.misMatch + self.repMatch)/float(self.qSize)
def reverseComplement(self):
"create a new PSL that is reverse complemented"
rc = Psl(None)
rc.match = self.match
rc.misMatch = self.misMatch
rc.repMatch = self.repMatch
rc.nCount = self.nCount
rc.qNumInsert = self.qNumInsert
rc.qBaseInsert = self.qBaseInsert
rc.tNumInsert = self.tNumInsert
rc.tBaseInsert = self.tBaseInsert
rc.strand = rcStrand(self.getQStrand()) + rcStrand(self.getTStrand())
rc.qName = self.qName
rc.qSize = self.qSize
rc.qStart = self.qStart
rc.qEnd = self.qEnd
rc.tName = self.tName
rc.tSize = self.tSize
rc.tStart = self.tStart
rc.tEnd = self.tEnd
rc.blockCount = self.blockCount
rc.blocks = []
for i in xrange(self.blockCount-1,-1,-1):
rc.blocks.append(self.blocks[i].reverseComplement(rc))
return rc
def __swapStrand(self, rc, keepTStrandImplicit, doRc):
# don't make implicit if already explicit
if keepTStrandImplicit and (len(self.strand) == 1):
qs = rcStrand(self.getTStrand()) if doRc else self.getTStrand()
ts = ""
else:
# swap and make|keep explicit
qs = self.getTStrand()
ts = self.getQStrand()
return qs+ts
def swapSides(self, keepTStrandImplicit=False):
"""Create a new PSL with target and query swapped,
If keepTStrandImplicit is True the psl has an implicit positive target strand, reverse
complement to keep the target strand positive and implicit.
If keepTStrandImplicit is False, don't reverse complement untranslated
alignments to keep target positive strand. This will make the target
strand explicit."""
doRc = (keepTStrandImplicit and (len(self.strand) == 1) and (self.getQStrand() == "-"))
swap = Psl(None)
swap.match = self.match
swap.misMatch = self.misMatch
swap.repMatch = self.repMatch
swap.nCount = self.nCount
swap.qNumInsert = self.tNumInsert
swap.qBaseInsert = self.tBaseInsert
swap.tNumInsert = self.qNumInsert
swap.tBaseInsert = self.qBaseInsert
swap.strand = self.__swapStrand(swap, keepTStrandImplicit, doRc)
swap.qName = self.tName
swap.qSize = self.tSize
swap.qStart = self.tStart
swap.qEnd = self.tEnd
swap.tName = self.qName
swap.tSize = self.qSize
swap.tStart = self.qStart
swap.tEnd = self.qEnd
swap.blockCount = self.blockCount
swap.blocks = []
if doRc:
for i in xrange(self.blockCount-1,-1,-1):
swap.blocks.append(self.blocks[i].swapSidesReverseComplement(swap))
else:
for i in xrange(self.blockCount):
swap.blocks.append(self.blocks[i].swapSides(swap))
return swap
class PslReader(object):
"""Read PSLs from a tab file"""
def __init__(self, fileName):
self.fh = None # required for __del__ if open fails
self.fh = fileOps.opengz(fileName)
def __del__(self):
if self.fh is not None:
self.fh.close()
def __iter__(self):
return self
def next(self):
"read next PSL"
while True:
line = self.fh.readline()
if (line == ""):
self.fh.close();
self.fh = None
raise StopIteration
if not ((len(line) == 1) or line.startswith('#')):
line = line[0:-1] # drop newline
return Psl(line.split("\t"))
class PslDbReader(object):
"""Read PSLs from db query. Factory methods are provide
to generate instances for range queries."""
pslColumns = ("matches", "misMatches", "repMatches", "nCount", "qNumInsert", "qBaseInsert", "tNumInsert", "tBaseInsert", "strand", "qName", "qSize", "qStart", "qEnd", "tName", "tSize", "tStart", "tEnd", "blockCount", "blockSizes", "qStarts", "tStarts")
pslSeqColumns = ("qSequence", "tSequence")
def __init__(self, conn, query):
self.cur = conn.cursor()
try:
self.cur.execute(query)
except:
try:
self.close()
except:
pass
raise # continue original exception
# FIXME: could make this optional or require column names in query
self.colIdxMap = dbOps.cursorColIdxMap(self.cur)
def close(self):
if self.cur is not None:
self.cur.close()
self.cur = None
def __del__(self):
self.close()
def __iter__(self):
return self
def next(self):
"read next PSL"
while True:
row = self.cur.fetchone()
if row is None:
self.cur.close()
self.cur = None
raise StopIteration
return Psl(row, dbColIdxMap=self.colIdxMap)
@staticmethod
def targetRangeQuery(conn, table, tName, tStart, tEnd, haveSeqs=False):
""" factor to generate PslDbReader for querying a target range. Must have a bin column"""
query = "select " + ",".join(PslDbReader.pslColumns)
if haveSeqs:
query += "," + ",".join(PslDbReader.pslSeqColumns)
query += " from " + table + " where " \
+ Binner.getOverlappingSqlExpr("tName", "bin", "tStart", "tEnd", tName, tStart, tEnd)
return PslDbReader(conn, query)
class PslTbl(list):
"""Table of PSL objects loaded from a tab-file
"""
def __mkQNameIdx(self):
self.qNameMap = MultiDict()
for psl in self:
self.qNameMap.add(psl.qName, psl)
def __mkTNameIdx(self):
self.tNameMap = MultiDict()
for psl in self:
self.tNameMap.add(psl.tName, psl)
def __init__(self, fileName, qNameIdx=False, tNameIdx=False):
for psl in PslReader(fileName):
self.append(psl)
self.qNameMap = self.tNameMap = None
if qNameIdx:
self.__mkQNameIdx()
if tNameIdx:
self.__mkTNameIdx()
def getQNameIter(self):
return self.qNameMap.iterkeys()
def haveQName(self, qName):
return (self.qNameMap.get(qName) is not None)
def getByQName(self, qName):
"""generator to get all PSL with a give qName"""
ent = self.qNameMap.get(qName)
if ent is not None:
if isinstance(ent, list):
for psl in ent:
yield psl
else:
yield ent
def getTNameIter(self):
return self.tNameMap.iterkeys()
def haveTName(self, tName):
return (self.tNameMap.get(qName) is not None)
def getByTName(self, tName):
"""generator to get all PSL with a give tName"""
ent = self.tNameMap.get(tName)
if ent is not None:
if isinstance(ent, list):
for psl in ent:
yield psl
else:
yield ent
|
henriquemiranda/yambopy
|
refs/heads/master
|
tutorial/bn/gs_bn.py
|
4
|
#
# Author: Henrique Pereira Coutada Miranda
# Run a Silicon groundstate calculation using Quantum Espresso
#
from __future__ import print_function, division
import sys
from qepy import *
import argparse
from schedulerpy import *
kpoints = [12,12,1]
kpoints_double = [24,24,1]
qpoints = [3,3,1]
layer_separation = 12
pw = 'pw.x'
q2r = 'q2r.x'
matdyn = 'matdyn.x'
prefix = 'bn'
npoints = 10
p = Path([ [[0.0, 0.0, 0.0],'G'],
[[0.5, 0.0, 0.0],'M'],
[[1./3,1./3,0.0],'K'],
[[0.0, 0.0, 0.0],'G']], [int(npoints*2),int(npoints),int(sqrt(5)*npoints)])
# scheduler
scheduler = Scheduler.factory
# create the input files
def get_inputfile():
""" Define a Quantum espresso input file for boron nitride
"""
qe = PwIn()
qe.atoms = [['N',[0.0,0.0,0.5]],
['B',[1/3,2/3,0.5]]]
qe.atypes = {'B': [10.811, "B.pbe-mt_fhi.UPF"],
'N': [14.0067,"N.pbe-mt_fhi.UPF"]}
qe.control['prefix'] = "'%s'"%prefix
qe.control['verbosity'] = "'high'"
qe.control['wf_collect'] = '.true.'
qe.control['pseudo_dir'] = "'../pseudos/'"
qe.system['celldm(1)'] = 4.7
qe.system['celldm(3)'] = layer_separation/qe.system['celldm(1)']
qe.system['ecutwfc'] = 60
qe.system['occupations'] = "'fixed'"
qe.system['nat'] = 2
qe.system['ntyp'] = 2
qe.system['ibrav'] = 4
qe.kpoints = [9, 9, 1]
qe.electrons['conv_thr'] = 1e-10
return qe
#relax
def relax():
if not os.path.isdir('relax'):
os.mkdir('relax')
qe = get_inputfile()
qe.control['calculation'] = "'vc-relax'"
qe.ions['ion_dynamics'] = "'bfgs'"
qe.cell['cell_dynamics'] = "'bfgs'"
qe.cell['cell_dofree'] = "'2Dxy'"
qe.write('relax/%s.scf'%prefix)
#scf
def scf(folder='scf'):
if not os.path.isdir(folder):
os.mkdir(folder)
qe = get_inputfile()
qe.control['calculation'] = "'scf'"
qe.write('%s/%s.scf'%(folder,prefix))
#nscf
def nscf(kpoints,folder='nscf'):
if not os.path.isdir(folder):
os.mkdir(folder)
qe = get_inputfile()
qe.control['calculation'] = "'nscf'"
qe.electrons['diago_full_acc'] = ".true."
qe.electrons['conv_thr'] = 1e-8
qe.system['nbnd'] = 60
qe.system['force_symmorphic'] = ".true."
qe.kpoints = kpoints
qe.write('%s/%s.nscf'%(folder,prefix))
#bands
def bands():
if not os.path.isdir('bands'):
os.mkdir('bands')
qe = get_inputfile()
qe.control['calculation'] = "'bands'"
qe.electrons['diago_full_acc'] = ".true."
qe.electrons['conv_thr'] = 1e-6
qe.system['nbnd'] = 6
qe.system['force_symmorphic'] = ".true."
qe.ktype = 'crystal'
qe.set_path(p)
qe.write('bands/%s.bands'%prefix)
def phonon(kpoints,qpoints,folder='phonon'):
if not os.path.isdir(folder):
os.mkdir(folder)
ph = PhIn()
ph['nq1'],ph['nq2'],ph['nq3'] = qpoints
ph['tr2_ph'] = 1e-8
ph['prefix'] = "'%s'"%prefix
ph['epsil'] = ".false."
ph['trans'] = ".true."
ph['fildyn'] = "'%s.dyn'"%prefix
ph['fildrho'] = "'%s.drho'"%prefix
ph['ldisp'] = ".true."
ph.write('%s/%s.ph'%(folder,prefix))
md = DynmatIn()
md['asr'] = "'simple'"
md['fildyn'] = "'%s.dyn1'"%prefix
md['filout'] = "'%s.modes'"%prefix
md.write('%s/%s.dynmat'%(folder,prefix))
def update_positions(pathin,pathout):
""" update the positions of the atoms in the scf file using the output of the relaxation loop
"""
e = PwXML(prefix,path=pathin)
pos = e.get_scaled_positions()
#open relaxed cell
qin = PwIn('%s/%s.scf'%(pathin,prefix))
#open scf file
qout = PwIn('%s/%s.scf'%(pathout,prefix))
#update positions on scf file
print("old celldm(1)", qin.system['celldm(1)'])
qout.system['celldm(1)'] = e.cell[0][0]
print("new celldm(1)", qout.system['celldm(1)'])
qout.atoms = zip([a[0] for a in qin.atoms],pos)
#write scf
qout.write('%s/%s.scf'%(pathout,prefix))
def run_plot():
print("running plotting:")
xml = PwXML(prefix=prefix,path='bands')
xml.plot_eigen(p)
def run_bands(nthreads=1):
print("running bands:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save bands/"%prefix)
qe_run.add_command("cd bands; mpirun -np %d %s -inp %s.bands -nk %d > bands.log"%(nthreads,pw,prefix,nthreads))
qe_run.run()
qe_run.clean()
print("done!")
if __name__ == "__main__":
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-r' ,'--relax', action="store_true", help='Structural relaxation')
parser.add_argument('-s' ,'--scf', action="store_true", help='Self-consistent calculation')
parser.add_argument('-n' ,'--nscf', action="store_true", help='Non-self consistent calculation')
parser.add_argument('-n2','--nscf_double', action="store_true", help='Non-self consistent calculation for the double grid')
parser.add_argument('-b' ,'--bands', action="store_true", help='Calculate band-structure')
parser.add_argument('-p' ,'--phonon', action="store_true", help='Phonon calculation')
parser.add_argument('-d' ,'--dispersion', action="store_true", help='Phonon dispersion')
parser.add_argument('-t' ,'--nthreads', help='Number of threads', default=2 )
args = parser.parse_args()
nthreads = int(args.nthreads)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
# create input files and folders
relax()
scf()
nscf(kpoints)
nscf(kpoints_double, folder='nscf_double')
bands()
phonon(kpoints,qpoints)
if args.relax:
print("running relax:")
qe_run = scheduler()
qe_run.add_command("cd relax; %s -inp %s.scf > relax.log"%(pw,prefix)) #relax
qe_run.run()
update_positions('relax','scf')
print("done!")
if args.scf:
print("running scf:")
qe_run = scheduler()
qe_run.add_command("cd scf; mpirun -np %d %s -inp %s.scf > scf.log"%(nthreads,pw,prefix)) #scf
qe_run.run()
print("done!")
if args.nscf:
print("running nscf:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save nscf/"%prefix) #nscf
qe_run.add_command("cd nscf; mpirun -np %d %s -nk %d -inp %s.nscf > nscf.log"%(nthreads,pw,nthreads,prefix)) #nscf
qe_run.run()
print("done!")
if args.nscf_double:
print("running nscf_double:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save nscf_double/"%prefix) #nscf
qe_run.add_command("cd nscf_double; mpirun -np %d %s -inp %s.nscf > nscf_double.log"%(nthreads,pw,prefix)) #nscf
qe_run.run()
print("done!")
if args.phonon:
print("running phonon:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save phonon/"%prefix)
qe_run.add_command("cd phonon; mpirun -np %d ph.x -inp %s.ph > phonon.log"%(nthreads,prefix)) #phonon
qe_run.add_command("dynmat.x < %s.dynmat > dynmat.log"%prefix) #matdyn
qe_run.run()
print("done!")
if args.dispersion:
qe_run = scheduler()
#q2r
disp = DynmatIn()
disp['fildyn']= "'%s.dyn'" % prefix
disp['zasr'] = "'simple'"
disp['flfrc'] = "'%s.fc'" % prefix
disp.write('phonon/q2r.in')
qe_run.add_command('cd phonon; %s < q2r.in'%q2r)
#dynmat
dyn = DynmatIn()
dyn['flfrc'] = "'%s.fc'" % prefix
dyn['asr'] = "'simple'"
dyn['flfrq'] = "'%s.freq'" % prefix
dyn['q_in_cryst_coord'] = '.true.'
dyn.qpoints = p.get_klist()
dyn.write('phonon/matdyn.in')
qe_run.add_command('%s < matdyn.in'%matdyn)
qe_run.run()
# matdyn class to read and plot the frequencies
m = Matdyn(natoms=2,path=p,folder='phonon')
m.plot_eigen()
if args.bands:
run_bands(nthreads)
run_plot()
|
acroreiser/kernel_samsung_msm
|
refs/heads/master
|
tools/perf/scripts/python/syscall-counts-by-pid.py
|
11180
|
# system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
|
cloudfoundry/php-buildpack-legacy
|
refs/heads/master
|
builds/runtimes/python-2.7.6/lib/python2.7/test/test_wsgiref.py
|
51
|
from __future__ import nested_scopes # Backward compat for 2.1
from unittest import TestCase
from wsgiref.util import setup_testing_defaults
from wsgiref.headers import Headers
from wsgiref.handlers import BaseHandler, BaseCGIHandler
from wsgiref import util
from wsgiref.validate import validator
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler, demo_app
from wsgiref.simple_server import make_server
from StringIO import StringIO
from SocketServer import BaseServer
import os
import re
import sys
from test import test_support
class MockServer(WSGIServer):
"""Non-socket HTTP server"""
def __init__(self, server_address, RequestHandlerClass):
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.server_bind()
def server_bind(self):
host, port = self.server_address
self.server_name = host
self.server_port = port
self.setup_environ()
class MockHandler(WSGIRequestHandler):
"""Non-socket HTTP handler"""
def setup(self):
self.connection = self.request
self.rfile, self.wfile = self.connection
def finish(self):
pass
def hello_app(environ,start_response):
start_response("200 OK", [
('Content-Type','text/plain'),
('Date','Mon, 05 Jun 2006 18:49:54 GMT')
])
return ["Hello, world!"]
def run_amock(app=hello_app, data="GET / HTTP/1.0\n\n"):
server = make_server("", 80, app, MockServer, MockHandler)
inp, out, err, olderr = StringIO(data), StringIO(), StringIO(), sys.stderr
sys.stderr = err
try:
server.finish_request((inp,out), ("127.0.0.1",8888))
finally:
sys.stderr = olderr
return out.getvalue(), err.getvalue()
def compare_generic_iter(make_it,match):
"""Utility to compare a generic 2.1/2.2+ iterator with an iterable
If running under Python 2.2+, this tests the iterator using iter()/next(),
as well as __getitem__. 'make_it' must be a function returning a fresh
iterator to be tested (since this may test the iterator twice)."""
it = make_it()
n = 0
for item in match:
if not it[n]==item: raise AssertionError
n+=1
try:
it[n]
except IndexError:
pass
else:
raise AssertionError("Too many items from __getitem__",it)
try:
iter, StopIteration
except NameError:
pass
else:
# Only test iter mode under 2.2+
it = make_it()
if not iter(it) is it: raise AssertionError
for item in match:
if not it.next()==item: raise AssertionError
try:
it.next()
except StopIteration:
pass
else:
raise AssertionError("Too many items from .next()",it)
class IntegrationTests(TestCase):
def check_hello(self, out, has_length=True):
self.assertEqual(out,
"HTTP/1.0 200 OK\r\n"
"Server: WSGIServer/0.1 Python/"+sys.version.split()[0]+"\r\n"
"Content-Type: text/plain\r\n"
"Date: Mon, 05 Jun 2006 18:49:54 GMT\r\n" +
(has_length and "Content-Length: 13\r\n" or "") +
"\r\n"
"Hello, world!"
)
def test_plain_hello(self):
out, err = run_amock()
self.check_hello(out)
def test_validated_hello(self):
out, err = run_amock(validator(hello_app))
# the middleware doesn't support len(), so content-length isn't there
self.check_hello(out, has_length=False)
def test_simple_validation_error(self):
def bad_app(environ,start_response):
start_response("200 OK", ('Content-Type','text/plain'))
return ["Hello, world!"]
out, err = run_amock(validator(bad_app))
self.assertTrue(out.endswith(
"A server error occurred. Please contact the administrator."
))
self.assertEqual(
err.splitlines()[-2],
"AssertionError: Headers (('Content-Type', 'text/plain')) must"
" be of type list: <type 'tuple'>"
)
class UtilityTests(TestCase):
def checkShift(self,sn_in,pi_in,part,sn_out,pi_out):
env = {'SCRIPT_NAME':sn_in,'PATH_INFO':pi_in}
util.setup_testing_defaults(env)
self.assertEqual(util.shift_path_info(env),part)
self.assertEqual(env['PATH_INFO'],pi_out)
self.assertEqual(env['SCRIPT_NAME'],sn_out)
return env
def checkDefault(self, key, value, alt=None):
# Check defaulting when empty
env = {}
util.setup_testing_defaults(env)
if isinstance(value, StringIO):
self.assertIsInstance(env[key], StringIO)
else:
self.assertEqual(env[key], value)
# Check existing value
env = {key:alt}
util.setup_testing_defaults(env)
self.assertTrue(env[key] is alt)
def checkCrossDefault(self,key,value,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(kw[key],value)
def checkAppURI(self,uri,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(util.application_uri(kw),uri)
def checkReqURI(self,uri,query=1,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(util.request_uri(kw,query),uri)
def checkFW(self,text,size,match):
def make_it(text=text,size=size):
return util.FileWrapper(StringIO(text),size)
compare_generic_iter(make_it,match)
it = make_it()
self.assertFalse(it.filelike.closed)
for item in it:
pass
self.assertFalse(it.filelike.closed)
it.close()
self.assertTrue(it.filelike.closed)
def testSimpleShifts(self):
self.checkShift('','/', '', '/', '')
self.checkShift('','/x', 'x', '/x', '')
self.checkShift('/','', None, '/', '')
self.checkShift('/a','/x/y', 'x', '/a/x', '/y')
self.checkShift('/a','/x/', 'x', '/a/x', '/')
def testNormalizedShifts(self):
self.checkShift('/a/b', '/../y', '..', '/a', '/y')
self.checkShift('', '/../y', '..', '', '/y')
self.checkShift('/a/b', '//y', 'y', '/a/b/y', '')
self.checkShift('/a/b', '//y/', 'y', '/a/b/y', '/')
self.checkShift('/a/b', '/./y', 'y', '/a/b/y', '')
self.checkShift('/a/b', '/./y/', 'y', '/a/b/y', '/')
self.checkShift('/a/b', '///./..//y/.//', '..', '/a', '/y/')
self.checkShift('/a/b', '///', '', '/a/b/', '')
self.checkShift('/a/b', '/.//', '', '/a/b/', '')
self.checkShift('/a/b', '/x//', 'x', '/a/b/x', '/')
self.checkShift('/a/b', '/.', None, '/a/b', '')
def testDefaults(self):
for key, value in [
('SERVER_NAME','127.0.0.1'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL','HTTP/1.0'),
('HTTP_HOST','127.0.0.1'),
('REQUEST_METHOD','GET'),
('SCRIPT_NAME',''),
('PATH_INFO','/'),
('wsgi.version', (1,0)),
('wsgi.run_once', 0),
('wsgi.multithread', 0),
('wsgi.multiprocess', 0),
('wsgi.input', StringIO("")),
('wsgi.errors', StringIO()),
('wsgi.url_scheme','http'),
]:
self.checkDefault(key,value)
def testCrossDefaults(self):
self.checkCrossDefault('HTTP_HOST',"foo.bar",SERVER_NAME="foo.bar")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="on")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="1")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="yes")
self.checkCrossDefault('wsgi.url_scheme',"http",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"80",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"443",HTTPS="on")
def testGuessScheme(self):
self.assertEqual(util.guess_scheme({}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"foo"}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"on"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"yes"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"1"}), "https")
def testAppURIs(self):
self.checkAppURI("http://127.0.0.1/")
self.checkAppURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
self.checkAppURI("http://spam.example.com:2071/",
HTTP_HOST="spam.example.com:2071", SERVER_PORT="2071")
self.checkAppURI("http://spam.example.com/",
SERVER_NAME="spam.example.com")
self.checkAppURI("http://127.0.0.1/",
HTTP_HOST="127.0.0.1", SERVER_NAME="spam.example.com")
self.checkAppURI("https://127.0.0.1/", HTTPS="on")
self.checkAppURI("http://127.0.0.1:8000/", SERVER_PORT="8000",
HTTP_HOST=None)
def testReqURIs(self):
self.checkReqURI("http://127.0.0.1/")
self.checkReqURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
self.checkReqURI("http://127.0.0.1/spammity/spam",
SCRIPT_NAME="/spammity", PATH_INFO="/spam")
self.checkReqURI("http://127.0.0.1/spammity/spam;ham",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;ham")
self.checkReqURI("http://127.0.0.1/spammity/spam;cookie=1234,5678",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;cookie=1234,5678")
self.checkReqURI("http://127.0.0.1/spammity/spam?say=ni",
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
self.checkReqURI("http://127.0.0.1/spammity/spam", 0,
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
def testFileWrapper(self):
self.checkFW("xyz"*50, 120, ["xyz"*40,"xyz"*10])
def testHopByHop(self):
for hop in (
"Connection Keep-Alive Proxy-Authenticate Proxy-Authorization "
"TE Trailers Transfer-Encoding Upgrade"
).split():
for alt in hop, hop.title(), hop.upper(), hop.lower():
self.assertTrue(util.is_hop_by_hop(alt))
# Not comprehensive, just a few random header names
for hop in (
"Accept Cache-Control Date Pragma Trailer Via Warning"
).split():
for alt in hop, hop.title(), hop.upper(), hop.lower():
self.assertFalse(util.is_hop_by_hop(alt))
class HeaderTests(TestCase):
def testMappingInterface(self):
test = [('x','y')]
self.assertEqual(len(Headers([])),0)
self.assertEqual(len(Headers(test[:])),1)
self.assertEqual(Headers(test[:]).keys(), ['x'])
self.assertEqual(Headers(test[:]).values(), ['y'])
self.assertEqual(Headers(test[:]).items(), test)
self.assertFalse(Headers(test).items() is test) # must be copy!
h=Headers([])
del h['foo'] # should not raise an error
h['Foo'] = 'bar'
for m in h.has_key, h.__contains__, h.get, h.get_all, h.__getitem__:
self.assertTrue(m('foo'))
self.assertTrue(m('Foo'))
self.assertTrue(m('FOO'))
self.assertFalse(m('bar'))
self.assertEqual(h['foo'],'bar')
h['foo'] = 'baz'
self.assertEqual(h['FOO'],'baz')
self.assertEqual(h.get_all('foo'),['baz'])
self.assertEqual(h.get("foo","whee"), "baz")
self.assertEqual(h.get("zoo","whee"), "whee")
self.assertEqual(h.setdefault("foo","whee"), "baz")
self.assertEqual(h.setdefault("zoo","whee"), "whee")
self.assertEqual(h["foo"],"baz")
self.assertEqual(h["zoo"],"whee")
def testRequireList(self):
self.assertRaises(TypeError, Headers, "foo")
def testExtras(self):
h = Headers([])
self.assertEqual(str(h),'\r\n')
h.add_header('foo','bar',baz="spam")
self.assertEqual(h['foo'], 'bar; baz="spam"')
self.assertEqual(str(h),'foo: bar; baz="spam"\r\n\r\n')
h.add_header('Foo','bar',cheese=None)
self.assertEqual(h.get_all('foo'),
['bar; baz="spam"', 'bar; cheese'])
self.assertEqual(str(h),
'foo: bar; baz="spam"\r\n'
'Foo: bar; cheese\r\n'
'\r\n'
)
class ErrorHandler(BaseCGIHandler):
"""Simple handler subclass for testing BaseHandler"""
# BaseHandler records the OS environment at import time, but envvars
# might have been changed later by other tests, which trips up
# HandlerTests.testEnviron().
os_environ = dict(os.environ.items())
def __init__(self,**kw):
setup_testing_defaults(kw)
BaseCGIHandler.__init__(
self, StringIO(''), StringIO(), StringIO(), kw,
multithread=True, multiprocess=True
)
class TestHandler(ErrorHandler):
"""Simple handler subclass for testing BaseHandler, w/error passthru"""
def handle_error(self):
raise # for testing, we want to see what's happening
class HandlerTests(TestCase):
def checkEnvironAttrs(self, handler):
env = handler.environ
for attr in [
'version','multithread','multiprocess','run_once','file_wrapper'
]:
if attr=='file_wrapper' and handler.wsgi_file_wrapper is None:
continue
self.assertEqual(getattr(handler,'wsgi_'+attr),env['wsgi.'+attr])
def checkOSEnviron(self,handler):
empty = {}; setup_testing_defaults(empty)
env = handler.environ
from os import environ
for k,v in environ.items():
if k not in empty:
self.assertEqual(env[k],v)
for k,v in empty.items():
self.assertIn(k, env)
def testEnviron(self):
h = TestHandler(X="Y")
h.setup_environ()
self.checkEnvironAttrs(h)
self.checkOSEnviron(h)
self.assertEqual(h.environ["X"],"Y")
def testCGIEnviron(self):
h = BaseCGIHandler(None,None,None,{})
h.setup_environ()
for key in 'wsgi.url_scheme', 'wsgi.input', 'wsgi.errors':
self.assertIn(key, h.environ)
def testScheme(self):
h=TestHandler(HTTPS="on"); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'https')
h=TestHandler(); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'http')
def testAbstractMethods(self):
h = BaseHandler()
for name in [
'_flush','get_stdin','get_stderr','add_cgi_vars'
]:
self.assertRaises(NotImplementedError, getattr(h,name))
self.assertRaises(NotImplementedError, h._write, "test")
def testContentLength(self):
# Demo one reason iteration is better than write()... ;)
def trivial_app1(e,s):
s('200 OK',[])
return [e['wsgi.url_scheme']]
def trivial_app2(e,s):
s('200 OK',[])(e['wsgi.url_scheme'])
return []
def trivial_app4(e,s):
# Simulate a response to a HEAD request
s('200 OK',[('Content-Length', '12345')])
return []
h = TestHandler()
h.run(trivial_app1)
self.assertEqual(h.stdout.getvalue(),
"Status: 200 OK\r\n"
"Content-Length: 4\r\n"
"\r\n"
"http")
h = TestHandler()
h.run(trivial_app2)
self.assertEqual(h.stdout.getvalue(),
"Status: 200 OK\r\n"
"\r\n"
"http")
h = TestHandler()
h.run(trivial_app4)
self.assertEqual(h.stdout.getvalue(),
b'Status: 200 OK\r\n'
b'Content-Length: 12345\r\n'
b'\r\n')
def testBasicErrorOutput(self):
def non_error_app(e,s):
s('200 OK',[])
return []
def error_app(e,s):
raise AssertionError("This should be caught by handler")
h = ErrorHandler()
h.run(non_error_app)
self.assertEqual(h.stdout.getvalue(),
"Status: 200 OK\r\n"
"Content-Length: 0\r\n"
"\r\n")
self.assertEqual(h.stderr.getvalue(),"")
h = ErrorHandler()
h.run(error_app)
self.assertEqual(h.stdout.getvalue(),
"Status: %s\r\n"
"Content-Type: text/plain\r\n"
"Content-Length: %d\r\n"
"\r\n%s" % (h.error_status,len(h.error_body),h.error_body))
self.assertNotEqual(h.stderr.getvalue().find("AssertionError"), -1)
def testErrorAfterOutput(self):
MSG = "Some output has been sent"
def error_app(e,s):
s("200 OK",[])(MSG)
raise AssertionError("This should be caught by handler")
h = ErrorHandler()
h.run(error_app)
self.assertEqual(h.stdout.getvalue(),
"Status: 200 OK\r\n"
"\r\n"+MSG)
self.assertNotEqual(h.stderr.getvalue().find("AssertionError"), -1)
def testHeaderFormats(self):
def non_error_app(e,s):
s('200 OK',[])
return []
stdpat = (
r"HTTP/%s 200 OK\r\n"
r"Date: \w{3}, [ 0123]\d \w{3} \d{4} \d\d:\d\d:\d\d GMT\r\n"
r"%s" r"Content-Length: 0\r\n" r"\r\n"
)
shortpat = (
"Status: 200 OK\r\n" "Content-Length: 0\r\n" "\r\n"
)
for ssw in "FooBar/1.0", None:
sw = ssw and "Server: %s\r\n" % ssw or ""
for version in "1.0", "1.1":
for proto in "HTTP/0.9", "HTTP/1.0", "HTTP/1.1":
h = TestHandler(SERVER_PROTOCOL=proto)
h.origin_server = False
h.http_version = version
h.server_software = ssw
h.run(non_error_app)
self.assertEqual(shortpat,h.stdout.getvalue())
h = TestHandler(SERVER_PROTOCOL=proto)
h.origin_server = True
h.http_version = version
h.server_software = ssw
h.run(non_error_app)
if proto=="HTTP/0.9":
self.assertEqual(h.stdout.getvalue(),"")
else:
self.assertTrue(
re.match(stdpat%(version,sw), h.stdout.getvalue()),
(stdpat%(version,sw), h.stdout.getvalue())
)
def testCloseOnError(self):
side_effects = {'close_called': False}
MSG = b"Some output has been sent"
def error_app(e,s):
s("200 OK",[])(MSG)
class CrashyIterable(object):
def __iter__(self):
while True:
yield b'blah'
raise AssertionError("This should be caught by handler")
def close(self):
side_effects['close_called'] = True
return CrashyIterable()
h = ErrorHandler()
h.run(error_app)
self.assertEqual(side_effects['close_called'], True)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
lakshayg/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/estimators/rnn_common.py
|
33
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common operations for RNN Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import metrics
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# NOTE(jtbates): As of February 10, 2017, some of the `RNNKeys` have been
# removed and replaced with values from `prediction_key.PredictionKey`. The key
# `RNNKeys.PREDICTIONS_KEY` has been replaced by
# `prediction_key.PredictionKey.SCORES` for regression and
# `prediction_key.PredictionKey.CLASSES` for classification. The key
# `RNNKeys.PROBABILITIES_KEY` has been replaced by
# `prediction_key.PredictionKey.PROBABILITIES`.
class RNNKeys(object):
FINAL_STATE_KEY = 'final_state'
LABELS_KEY = '__labels__'
SEQUENCE_LENGTH_KEY = 'sequence_length'
STATE_PREFIX = 'rnn_cell_state'
class PredictionType(object):
"""Enum-like values for the type of prediction that the model makes.
"""
SINGLE_VALUE = 1
MULTIPLE_VALUE = 2
_CELL_TYPES = {'basic_rnn': contrib_rnn.BasicRNNCell,
'lstm': contrib_rnn.LSTMCell,
'gru': contrib_rnn.GRUCell,}
def _get_single_cell(cell_type, num_units):
"""Constructs and return a single `RNNCell`.
Args:
cell_type: Either a string identifying the `RNNCell` type or a subclass of
`RNNCell`.
num_units: The number of units in the `RNNCell`.
Returns:
An initialized `RNNCell`.
Raises:
ValueError: `cell_type` is an invalid `RNNCell` name.
TypeError: `cell_type` is not a string or a subclass of `RNNCell`.
"""
cell_type = _CELL_TYPES.get(cell_type, cell_type)
if not cell_type or not issubclass(cell_type, contrib_rnn.RNNCell):
raise ValueError('The supported cell types are {}; got {}'.format(
list(_CELL_TYPES.keys()), cell_type))
return cell_type(num_units=num_units)
def construct_rnn_cell(num_units, cell_type='basic_rnn',
dropout_keep_probabilities=None):
"""Constructs cells, applies dropout and assembles a `MultiRNNCell`.
The cell type chosen by DynamicRNNEstimator.__init__() is the same as
returned by this function when called with the same arguments.
Args:
num_units: A single `int` or a list/tuple of `int`s. The size of the
`RNNCell`s.
cell_type: A string identifying the `RNNCell` type or a subclass of
`RNNCell`.
dropout_keep_probabilities: a list of dropout probabilities or `None`. If a
list is given, it must have length `len(cell_type) + 1`.
Returns:
An initialized `RNNCell`.
"""
if not isinstance(num_units, (list, tuple)):
num_units = (num_units,)
cells = [_get_single_cell(cell_type, n) for n in num_units]
if dropout_keep_probabilities:
cells = apply_dropout(cells, dropout_keep_probabilities)
if len(cells) == 1:
return cells[0]
return contrib_rnn.MultiRNNCell(cells)
def apply_dropout(cells, dropout_keep_probabilities, random_seed=None):
"""Applies dropout to the outputs and inputs of `cell`.
Args:
cells: A list of `RNNCell`s.
dropout_keep_probabilities: a list whose elements are either floats in
`[0.0, 1.0]` or `None`. It must have length one greater than `cells`.
random_seed: Seed for random dropout.
Returns:
A list of `RNNCell`s, the result of applying the supplied dropouts.
Raises:
ValueError: If `len(dropout_keep_probabilities) != len(cells) + 1`.
"""
if len(dropout_keep_probabilities) != len(cells) + 1:
raise ValueError(
'The number of dropout probabilities must be one greater than the '
'number of cells. Got {} cells and {} dropout probabilities.'.format(
len(cells), len(dropout_keep_probabilities)))
wrapped_cells = [
contrib_rnn.DropoutWrapper(cell, prob, 1.0, seed=random_seed)
for cell, prob in zip(cells[:-1], dropout_keep_probabilities[:-2])
]
wrapped_cells.append(
contrib_rnn.DropoutWrapper(cells[-1], dropout_keep_probabilities[-2],
dropout_keep_probabilities[-1]))
return wrapped_cells
def get_eval_metric_ops(problem_type, prediction_type, sequence_length,
prediction_dict, labels):
"""Returns eval metric ops for given `problem_type` and `prediction_type`.
Args:
problem_type: `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
prediction_type: `PredictionType.SINGLE_VALUE` or
`PredictionType.MULTIPLE_VALUE`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
prediction_dict: A dict of prediction tensors.
labels: The label `Tensor`.
Returns:
A `dict` mapping strings to the result of calling the metric_fn.
"""
eval_metric_ops = {}
if problem_type == constants.ProblemType.CLASSIFICATION:
# Multi value classification
if prediction_type == PredictionType.MULTIPLE_VALUE:
mask_predictions, mask_labels = mask_activations_and_labels(
prediction_dict[prediction_key.PredictionKey.CLASSES], labels,
sequence_length)
eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
predictions=mask_predictions, labels=mask_labels)
# Single value classification
elif prediction_type == PredictionType.SINGLE_VALUE:
eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
predictions=prediction_dict[prediction_key.PredictionKey.CLASSES],
labels=labels)
elif problem_type == constants.ProblemType.LINEAR_REGRESSION:
# Multi value regression
if prediction_type == PredictionType.MULTIPLE_VALUE:
pass
# Single value regression
elif prediction_type == PredictionType.SINGLE_VALUE:
pass
return eval_metric_ops
def select_last_activations(activations, sequence_lengths):
"""Selects the nth set of activations for each n in `sequence_length`.
Returns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not
`None`, then `output[i, :] = activations[i, sequence_length[i] - 1, :]`. If
`sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.
Args:
activations: A `Tensor` with shape `[batch_size, padded_length, k]`.
sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.
Returns:
A `Tensor` of shape `[batch_size, k]`.
"""
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
num_label_columns = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
reshaped_activations = array_ops.reshape(activations,
[-1, num_label_columns])
indices = math_ops.range(batch_size) * padded_length + sequence_lengths - 1
last_activations = array_ops.gather(reshaped_activations, indices)
last_activations.set_shape(
[activations.get_shape()[0], activations.get_shape()[2]])
return last_activations
def mask_activations_and_labels(activations, labels, sequence_lengths):
"""Remove entries outside `sequence_lengths` and returned flattened results.
Args:
activations: Output of the RNN, shape `[batch_size, padded_length, k]`.
labels: Label values, shape `[batch_size, padded_length]`.
sequence_lengths: A `Tensor` of shape `[batch_size]` with the unpadded
length of each sequence. If `None`, then each sequence is unpadded.
Returns:
activations_masked: `logit` values with those beyond `sequence_lengths`
removed for each batch. Batches are then concatenated. Shape
`[tf.sum(sequence_lengths), k]` if `sequence_lengths` is not `None` and
shape `[batch_size * padded_length, k]` otherwise.
labels_masked: Label values after removing unneeded entries. Shape
`[tf.sum(sequence_lengths)]` if `sequence_lengths` is not `None` and shape
`[batch_size * padded_length]` otherwise.
"""
with ops.name_scope(
'mask_activations_and_labels',
values=[activations, labels, sequence_lengths]):
labels_shape = array_ops.shape(labels)
batch_size = labels_shape[0]
padded_length = labels_shape[1]
if sequence_lengths is None:
flattened_dimension = padded_length * batch_size
activations_masked = array_ops.reshape(activations,
[flattened_dimension, -1])
labels_masked = array_ops.reshape(labels, [flattened_dimension])
else:
mask = array_ops.sequence_mask(sequence_lengths, padded_length)
activations_masked = array_ops.boolean_mask(activations, mask)
labels_masked = array_ops.boolean_mask(labels, mask)
return activations_masked, labels_masked
def multi_value_predictions(activations, target_column, problem_type,
predict_probabilities):
"""Maps `activations` from the RNN to predictions for multi value models.
If `predict_probabilities` is `False`, this function returns a `dict`
containing single entry with key `prediction_key.PredictionKey.CLASSES` for
`problem_type` `ProblemType.CLASSIFICATION` or
`prediction_key.PredictionKey.SCORE` for `problem_type`
`ProblemType.LINEAR_REGRESSION`.
If `predict_probabilities` is `True`, it will contain a second entry with key
`prediction_key.PredictionKey.PROBABILITIES`. The
value of this entry is a `Tensor` of probabilities with shape
`[batch_size, padded_length, num_classes]`.
Note that variable length inputs will yield some predictions that don't have
meaning. For example, if `sequence_length = [3, 2]`, then prediction `[1, 2]`
has no meaningful interpretation.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
target_column: An initialized `TargetColumn`, calculate predictions.
problem_type: Either `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
predict_probabilities: A Python boolean, indicating whether probabilities
should be returned. Should only be set to `True` for
classification/logistic regression problems.
Returns:
A `dict` mapping strings to `Tensors`.
"""
with ops.name_scope('MultiValuePrediction'):
activations_shape = array_ops.shape(activations)
flattened_activations = array_ops.reshape(activations,
[-1, activations_shape[2]])
prediction_dict = {}
if predict_probabilities:
flat_probabilities = target_column.logits_to_predictions(
flattened_activations, proba=True)
flat_predictions = math_ops.argmax(flat_probabilities, 1)
if target_column.num_label_columns == 1:
probability_shape = array_ops.concat([activations_shape[:2], [2]], 0)
else:
probability_shape = activations_shape
probabilities = array_ops.reshape(
flat_probabilities,
probability_shape,
name=prediction_key.PredictionKey.PROBABILITIES)
prediction_dict[
prediction_key.PredictionKey.PROBABILITIES] = probabilities
else:
flat_predictions = target_column.logits_to_predictions(
flattened_activations, proba=False)
predictions_name = (prediction_key.PredictionKey.CLASSES
if problem_type == constants.ProblemType.CLASSIFICATION
else prediction_key.PredictionKey.SCORES)
predictions = array_ops.reshape(
flat_predictions, [activations_shape[0], activations_shape[1]],
name=predictions_name)
prediction_dict[predictions_name] = predictions
return prediction_dict
|
orangeholic/protobuf
|
refs/heads/master
|
python/google/protobuf/text_encoding.py
|
55
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#PY25 compatible for GAE.
#
"""Encoding related utilities."""
import re
import sys ##PY25
# Lookup table for utf8
_cescape_utf8_to_str = [chr(i) for i in xrange(0, 256)]
_cescape_utf8_to_str[9] = r'\t' # optional escape
_cescape_utf8_to_str[10] = r'\n' # optional escape
_cescape_utf8_to_str[13] = r'\r' # optional escape
_cescape_utf8_to_str[39] = r"\'" # optional escape
_cescape_utf8_to_str[34] = r'\"' # necessary escape
_cescape_utf8_to_str[92] = r'\\' # necessary escape
# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32)
_cescape_byte_to_str = ([r'\%03o' % i for i in xrange(0, 32)] +
[chr(i) for i in xrange(32, 127)] +
[r'\%03o' % i for i in xrange(127, 256)])
_cescape_byte_to_str[9] = r'\t' # optional escape
_cescape_byte_to_str[10] = r'\n' # optional escape
_cescape_byte_to_str[13] = r'\r' # optional escape
_cescape_byte_to_str[39] = r"\'" # optional escape
_cescape_byte_to_str[34] = r'\"' # necessary escape
_cescape_byte_to_str[92] = r'\\' # necessary escape
def CEscape(text, as_utf8):
"""Escape a bytes string for use in an ascii protocol buffer.
text.encode('string_escape') does not seem to satisfy our needs as it
encodes unprintable characters using two-digit hex escapes whereas our
C++ unescaping function allows hex escapes to be any length. So,
"\0011".encode('string_escape') ends up being "\\x011", which will be
decoded in C++ as a single-character string with char code 0x11.
Args:
text: A byte string to be escaped
as_utf8: Specifies if result should be returned in UTF-8 encoding
Returns:
Escaped string
"""
# PY3 hack: make Ord work for str and bytes:
# //platforms/networking/data uses unicode here, hence basestring.
Ord = ord if isinstance(text, basestring) else lambda x: x
if as_utf8:
return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text)
return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)
_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])')
_cescape_highbit_to_str = ([chr(i) for i in range(0, 127)] +
[r'\%03o' % i for i in range(127, 256)])
def CUnescape(text):
"""Unescape a text string with C-style escape sequences to UTF-8 bytes."""
def ReplaceHex(m):
# Only replace the match if the number of leading back slashes is odd. i.e.
# the slash itself is not escaped.
if len(m.group(1)) & 1:
return m.group(1) + 'x0' + m.group(2)
return m.group(0)
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
if sys.version_info[0] < 3: ##PY25
##!PY25 if str is bytes: # PY2
return result.decode('string_escape')
result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result)
return (result.encode('ascii') # Make it bytes to allow decode.
.decode('unicode_escape')
# Make it bytes again to return the proper type.
.encode('raw_unicode_escape'))
|
morpheby/levelup-by
|
refs/heads/master
|
common/djangoapps/external_auth/views.py
|
2
|
import functools
import json
import logging
import random
import re
import string # pylint: disable=W0402
import fnmatch
import unicodedata
from textwrap import dedent
from external_auth.models import ExternalAuthMap
from external_auth.djangostore import DjangoOpenIDStore
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME, authenticate, login, logout
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
if settings.MITX_FEATURES.get('AUTH_USE_CAS'):
from django_cas.views import login as django_cas_login
from student.models import UserProfile, TestCenterUser, TestCenterRegistration
from django.http import HttpResponse, HttpResponseRedirect, HttpRequest, HttpResponseForbidden
from django.utils.http import urlquote, is_safe_url
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from mitxmako.shortcuts import render_to_response, render_to_string
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
from django.contrib.csrf.middleware import csrf_exempt
from django_future.csrf import ensure_csrf_cookie
import django_openid_auth.views as openid_views
from django_openid_auth import auth as openid_auth
from openid.consumer.consumer import SUCCESS
from openid.server.server import Server, ProtocolError, UntrustedReturnURL
from openid.server.trustroot import TrustRoot
from openid.extensions import ax, sreg
from ratelimitbackend.exceptions import RateLimitException
import student.views
# Required for Pearson
from courseware.views import get_module_for_descriptor, jump_to
from courseware.model_data import FieldDataCache
from xmodule.modulestore.django import modulestore
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore import Location
from xmodule.modulestore.exceptions import ItemNotFoundError
log = logging.getLogger("mitx.external_auth")
AUDIT_LOG = logging.getLogger("audit")
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
# -----------------------------------------------------------------------------
# OpenID Common
# -----------------------------------------------------------------------------
@csrf_exempt
def default_render_failure(request,
message,
status=403,
template_name='extauth_failure.html',
exception=None):
"""Render an Openid error page to the user"""
log.debug("In openid_failure " + message)
data = render_to_string(template_name,
dict(message=message, exception=exception))
return HttpResponse(data, status=status)
# -----------------------------------------------------------------------------
# OpenID Authentication
# -----------------------------------------------------------------------------
def generate_password(length=12, chars=string.letters + string.digits):
"""Generate internal password for externally authenticated user"""
choice = random.SystemRandom().choice
return ''.join([choice(chars) for _i in range(length)])
@csrf_exempt
def openid_login_complete(request,
redirect_field_name=REDIRECT_FIELD_NAME,
render_failure=None):
"""Complete the openid login process"""
render_failure = (render_failure or default_render_failure)
openid_response = openid_views.parse_openid_response(request)
if not openid_response:
return render_failure(request,
'This is an OpenID relying party endpoint.')
if openid_response.status == SUCCESS:
external_id = openid_response.identity_url
oid_backend = openid_auth.OpenIDBackend()
details = oid_backend._extract_user_details(openid_response)
log.debug('openid success, details=%s', details)
url = getattr(settings, 'OPENID_SSO_SERVER_URL', None)
external_domain = "{0}{1}".format(OPENID_DOMAIN_PREFIX, url)
fullname = '%s %s' % (details.get('first_name', ''),
details.get('last_name', ''))
return _external_login_or_signup(
request,
external_id,
external_domain,
details,
details.get('email', ''),
fullname
)
return render_failure(request, 'Openid failure')
def _external_login_or_signup(request,
external_id,
external_domain,
credentials,
email,
fullname,
retfun=None):
"""Generic external auth login or signup"""
# see if we have a map from this external_id to an edX username
try:
eamap = ExternalAuthMap.objects.get(external_id=external_id,
external_domain=external_domain)
log.debug('Found eamap=%s', eamap)
except ExternalAuthMap.DoesNotExist:
# go render form for creating edX user
eamap = ExternalAuthMap(external_id=external_id,
external_domain=external_domain,
external_credentials=json.dumps(credentials))
eamap.external_email = email
eamap.external_name = fullname
eamap.internal_password = generate_password()
log.debug('Created eamap=%s', eamap)
eamap.save()
log.info(u"External_Auth login_or_signup for %s : %s : %s : %s", external_domain, external_id, email, fullname)
uses_shibboleth = settings.MITX_FEATURES.get('AUTH_USE_SHIB') and external_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX)
internal_user = eamap.user
if internal_user is None:
if uses_shibboleth:
# If we are using shib, try to link accounts
# For Stanford shib, the email the idp returns is actually under the control of the user.
# Since the id the idps return is not user-editable, and is of the from "username@stanford.edu",
# use the id to link accounts instead.
try:
link_user = User.objects.get(email=eamap.external_id)
if not ExternalAuthMap.objects.filter(user=link_user).exists():
# if there's no pre-existing linked eamap, we link the user
eamap.user = link_user
eamap.save()
internal_user = link_user
log.info('SHIB: Linking existing account for %s', eamap.external_id)
# now pass through to log in
else:
# otherwise, there must have been an error, b/c we've already linked a user with these external
# creds
failure_msg = _(dedent("""
You have already created an account using an external login like WebAuth or Shibboleth.
Please contact %s for support """
% getattr(settings, 'TECH_SUPPORT_EMAIL', 'techsupport@class.stanford.edu')))
return default_render_failure(request, failure_msg)
except User.DoesNotExist:
log.info('SHIB: No user for %s yet, doing signup', eamap.external_email)
return _signup(request, eamap)
else:
log.info('No user for %s yet. doing signup', eamap.external_email)
return _signup(request, eamap)
# We trust shib's authentication, so no need to authenticate using the password again
uname = internal_user.username
if uses_shibboleth:
user = internal_user
# Assuming this 'AUTHENTICATION_BACKENDS' is set in settings, which I think is safe
if settings.AUTHENTICATION_BACKENDS:
auth_backend = settings.AUTHENTICATION_BACKENDS[0]
else:
auth_backend = 'django.contrib.auth.backends.ModelBackend'
user.backend = auth_backend
AUDIT_LOG.info('Linked user "%s" logged in via Shibboleth', user.email)
else:
user = authenticate(username=uname, password=eamap.internal_password, request=request)
if user is None:
# we want to log the failure, but don't want to log the password attempted:
AUDIT_LOG.warning('External Auth Login failed for "%s"', uname)
return _signup(request, eamap)
if not user.is_active:
AUDIT_LOG.warning('User "%s" is not active after external login', uname)
# TODO: improve error page
msg = 'Account not yet activated: please look for link in your email'
return default_render_failure(request, msg)
login(request, user)
request.session.set_expiry(0)
# Now to try enrollment
# Need to special case Shibboleth here because it logs in via a GET.
# testing request.method for extra paranoia
if uses_shibboleth and request.method == 'GET':
enroll_request = _make_shib_enrollment_request(request)
student.views.try_change_enrollment(enroll_request)
else:
student.views.try_change_enrollment(request)
AUDIT_LOG.info("Login success - %s (%s)", user.username, user.email)
if retfun is None:
return redirect('/')
return retfun()
def _flatten_to_ascii(txt):
"""
Flattens possibly unicode txt to ascii (django username limitation)
@param name:
@return: the flattened txt (in the same type as was originally passed in)
"""
if isinstance(txt, str):
txt = txt.decode('utf-8')
return unicodedata.normalize('NFKD', txt).encode('ASCII', 'ignore')
else:
return unicode(unicodedata.normalize('NFKD', txt).encode('ASCII', 'ignore'))
@ensure_csrf_cookie
def _signup(request, eamap):
"""
Present form to complete for signup via external authentication.
Even though the user has external credentials, he/she still needs
to create an account on the edX system, and fill in the user
registration form.
eamap is an ExternalAuthMap object, specifying the external user
for which to complete the signup.
"""
# save this for use by student.views.create_account
request.session['ExternalAuthMap'] = eamap
# default conjoin name, no spaces, flattened to ascii b/c django can't handle unicode usernames, sadly
# but this only affects username, not fullname
username = re.sub(r'\s', '', _flatten_to_ascii(eamap.external_name), flags=re.UNICODE)
context = {'has_extauth_info': True,
'show_signup_immediately': True,
'extauth_domain': eamap.external_domain,
'extauth_id': eamap.external_id,
'extauth_email': eamap.external_email,
'extauth_username': username,
'extauth_name': eamap.external_name,
'ask_for_tos': True,
}
# Some openEdX instances can't have terms of service for shib users, like
# according to Stanford's Office of General Counsel
uses_shibboleth = (settings.MITX_FEATURES.get('AUTH_USE_SHIB') and
eamap.external_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX))
if uses_shibboleth and settings.MITX_FEATURES.get('SHIB_DISABLE_TOS'):
context['ask_for_tos'] = False
# detect if full name is blank and ask for it from user
context['ask_for_fullname'] = eamap.external_name.strip() == ''
# validate provided mail and if it's not valid ask the user
try:
validate_email(eamap.external_email)
context['ask_for_email'] = False
except ValidationError:
context['ask_for_email'] = True
log.info('EXTAUTH: Doing signup for %s', eamap.external_id)
return student.views.register_user(request, extra_context=context)
# -----------------------------------------------------------------------------
# MIT SSL
# -----------------------------------------------------------------------------
def _ssl_dn_extract_info(dn_string):
"""
Extract username, email address (may be anyuser@anydomain.com) and
full name from the SSL DN string. Return (user,email,fullname) if
successful, and None otherwise.
"""
ss = re.search('/emailAddress=(.*)@([^/]+)', dn_string)
if ss:
user = ss.group(1)
email = "%s@%s" % (user, ss.group(2))
else:
return None
ss = re.search('/CN=([^/]+)/', dn_string)
if ss:
fullname = ss.group(1)
else:
return None
return (user, email, fullname)
def _ssl_get_cert_from_request(request):
"""
Extract user information from certificate, if it exists, returning (user, email, fullname).
Else return None.
"""
certkey = "SSL_CLIENT_S_DN" # specify the request.META field to use
cert = request.META.get(certkey, '')
if not cert:
cert = request.META.get('HTTP_' + certkey, '')
if not cert:
try:
# try the direct apache2 SSL key
cert = request._req.subprocess_env.get(certkey, '')
except Exception:
return ''
return cert
def ssl_login_shortcut(fn):
"""
Python function decorator for login procedures, to allow direct login
based on existing ExternalAuth record and MIT ssl certificate.
"""
def wrapped(*args, **kwargs):
if not settings.MITX_FEATURES['AUTH_USE_MIT_CERTIFICATES']:
return fn(*args, **kwargs)
request = args[0]
cert = _ssl_get_cert_from_request(request)
if not cert: # no certificate information - show normal login window
return fn(*args, **kwargs)
(_user, email, fullname) = _ssl_dn_extract_info(cert)
return _external_login_or_signup(
request,
external_id=email,
external_domain="ssl:MIT",
credentials=cert,
email=email,
fullname=fullname
)
return wrapped
@csrf_exempt
def ssl_login(request):
"""
This is called by branding.views.index when
MITX_FEATURES['AUTH_USE_MIT_CERTIFICATES'] = True
Used for MIT user authentication. This presumes the web server
(nginx) has been configured to require specific client
certificates.
If the incoming protocol is HTTPS (SSL) then authenticate via
client certificate. The certificate provides user email and
fullname; this populates the ExternalAuthMap. The user is
nevertheless still asked to complete the edX signup.
Else continues on with student.views.index, and no authentication.
"""
# Just to make sure we're calling this only at MIT:
if not settings.MITX_FEATURES['AUTH_USE_MIT_CERTIFICATES']:
return HttpResponseForbidden()
cert = _ssl_get_cert_from_request(request)
if not cert:
# no certificate information - go onward to main index
return student.views.index(request)
(_user, email, fullname) = _ssl_dn_extract_info(cert)
retfun = functools.partial(student.views.index, request)
return _external_login_or_signup(
request,
external_id=email,
external_domain="ssl:MIT",
credentials=cert,
email=email,
fullname=fullname,
retfun=retfun
)
# -----------------------------------------------------------------------------
# CAS (Central Authentication Service)
# -----------------------------------------------------------------------------
def cas_login(request, next_page=None, required=False):
"""
Uses django_cas for authentication.
CAS is a common authentcation method pioneered by Yale.
See http://en.wikipedia.org/wiki/Central_Authentication_Service
Does normal CAS login then generates user_profile if nonexistent,
and if login was successful. We assume that user details are
maintained by the central service, and thus an empty user profile
is appropriate.
"""
ret = django_cas_login(request, next_page, required)
if request.user.is_authenticated():
user = request.user
if not UserProfile.objects.filter(user=user):
user_profile = UserProfile(name=user.username, user=user)
user_profile.save()
return ret
# -----------------------------------------------------------------------------
# Shibboleth (Stanford and others. Uses *Apache* environment variables)
# -----------------------------------------------------------------------------
def shib_login(request):
"""
Uses Apache's REMOTE_USER environment variable as the external id.
This in turn typically uses EduPersonPrincipalName
http://www.incommonfederation.org/attributesummary.html#eduPersonPrincipal
but the configuration is in the shibboleth software.
"""
shib_error_msg = _(dedent(
"""
Your university identity server did not return your ID information to us.
Please try logging in again. (You may need to restart your browser.)
"""))
if not request.META.get('REMOTE_USER'):
log.error("SHIB: no REMOTE_USER found in request.META")
return default_render_failure(request, shib_error_msg)
elif not request.META.get('Shib-Identity-Provider'):
log.error("SHIB: no Shib-Identity-Provider in request.META")
return default_render_failure(request, shib_error_msg)
else:
# If we get here, the user has authenticated properly
shib = {attr: request.META.get(attr, '')
for attr in ['REMOTE_USER', 'givenName', 'sn', 'mail', 'Shib-Identity-Provider', 'displayName']}
# Clean up first name, last name, and email address
# TODO: Make this less hardcoded re: format, but split will work
# even if ";" is not present, since we are accessing 1st element
shib['sn'] = shib['sn'].split(";")[0].strip().capitalize()
shib['givenName'] = shib['givenName'].split(";")[0].strip().capitalize()
# TODO: should we be logging creds here, at info level?
log.info("SHIB creds returned: %r", shib)
fullname = shib['displayName'] if shib['displayName'] else u'%s %s' % (shib['givenName'], shib['sn'])
redirect_to = request.REQUEST.get('next')
retfun = None
if redirect_to:
retfun = functools.partial(_safe_postlogin_redirect, redirect_to, request.get_host())
return _external_login_or_signup(
request,
external_id=shib['REMOTE_USER'],
external_domain=SHIBBOLETH_DOMAIN_PREFIX + shib['Shib-Identity-Provider'],
credentials=shib,
email=shib['mail'],
fullname=fullname,
retfun=retfun
)
def _safe_postlogin_redirect(redirect_to, safehost, default_redirect='/'):
"""
If redirect_to param is safe (not off this host), then perform the redirect.
Otherwise just redirect to '/'.
Basically copied from django.contrib.auth.views.login
@param redirect_to: user-supplied redirect url
@param safehost: which host is safe to redirect to
@return: an HttpResponseRedirect
"""
if is_safe_url(url=redirect_to, host=safehost):
return redirect(redirect_to)
return redirect(default_redirect)
def _make_shib_enrollment_request(request):
"""
Need this hack function because shibboleth logins don't happen over POST
but change_enrollment expects its request to be a POST, with
enrollment_action and course_id POST parameters.
"""
enroll_request = HttpRequest()
enroll_request.user = request.user
enroll_request.session = request.session
enroll_request.method = "POST"
# copy() also makes GET and POST mutable
# See https://docs.djangoproject.com/en/dev/ref/request-response/#django.http.QueryDict.update
enroll_request.GET = request.GET.copy()
enroll_request.POST = request.POST.copy()
# also have to copy these GET parameters over to POST
if "enrollment_action" not in enroll_request.POST and "enrollment_action" in enroll_request.GET:
enroll_request.POST.setdefault('enrollment_action', enroll_request.GET.get('enrollment_action'))
if "course_id" not in enroll_request.POST and "course_id" in enroll_request.GET:
enroll_request.POST.setdefault('course_id', enroll_request.GET.get('course_id'))
return enroll_request
def course_specific_login(request, course_id):
"""
Dispatcher function for selecting the specific login method
required by the course
"""
try:
course = course_from_id(course_id)
except ItemNotFoundError:
# couldn't find the course, will just return vanilla signin page
return _redirect_with_get_querydict('signin_user', request.GET)
# now the dispatching conditionals. Only shib for now
if settings.MITX_FEATURES.get('AUTH_USE_SHIB') and course.enrollment_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX):
return _redirect_with_get_querydict('shib-login', request.GET)
# Default fallthrough to normal signin page
return _redirect_with_get_querydict('signin_user', request.GET)
def course_specific_register(request, course_id):
"""
Dispatcher function for selecting the specific registration method
required by the course
"""
try:
course = course_from_id(course_id)
except ItemNotFoundError:
# couldn't find the course, will just return vanilla registration page
return _redirect_with_get_querydict('register_user', request.GET)
# now the dispatching conditionals. Only shib for now
if settings.MITX_FEATURES.get('AUTH_USE_SHIB') and course.enrollment_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX):
# shib-login takes care of both registration and login flows
return _redirect_with_get_querydict('shib-login', request.GET)
# Default fallthrough to normal registration page
return _redirect_with_get_querydict('register_user', request.GET)
def _redirect_with_get_querydict(view_name, get_querydict):
"""
Helper function to carry over get parameters across redirects
Using urlencode(safe='/') because the @login_required decorator generates 'next' queryparams with '/' unencoded
"""
if get_querydict:
return redirect("%s?%s" % (reverse(view_name), get_querydict.urlencode(safe='/')))
return redirect(view_name)
# -----------------------------------------------------------------------------
# OpenID Provider
# -----------------------------------------------------------------------------
def get_xrds_url(resource, request):
"""
Return the XRDS url for a resource
"""
host = request.get_host()
location = host + '/openid/provider/' + resource + '/'
if request.is_secure():
return 'https://' + location
else:
return 'http://' + location
def add_openid_simple_registration(request, response, data):
sreg_data = {}
sreg_request = sreg.SRegRequest.fromOpenIDRequest(request)
sreg_fields = sreg_request.allRequestedFields()
# if consumer requested simple registration fields, add them
if sreg_fields:
for field in sreg_fields:
if field == 'email' and 'email' in data:
sreg_data['email'] = data['email']
elif field == 'fullname' and 'fullname' in data:
sreg_data['fullname'] = data['fullname']
elif field == 'nickname' and 'nickname' in data:
sreg_data['nickname'] = data['nickname']
# construct sreg response
sreg_response = sreg.SRegResponse.extractResponse(sreg_request,
sreg_data)
sreg_response.toMessage(response.fields)
def add_openid_attribute_exchange(request, response, data):
try:
ax_request = ax.FetchRequest.fromOpenIDRequest(request)
except ax.AXError:
# not using OpenID attribute exchange extension
pass
else:
ax_response = ax.FetchResponse()
# if consumer requested attribute exchange fields, add them
if ax_request and ax_request.requested_attributes:
for type_uri in ax_request.requested_attributes.iterkeys():
email_schema = 'http://axschema.org/contact/email'
name_schema = 'http://axschema.org/namePerson'
if type_uri == email_schema and 'email' in data:
ax_response.addValue(email_schema, data['email'])
elif type_uri == name_schema and 'fullname' in data:
ax_response.addValue(name_schema, data['fullname'])
# construct ax response
ax_response.toMessage(response.fields)
def provider_respond(server, request, response, data):
"""
Respond to an OpenID request
"""
# get and add extensions
add_openid_simple_registration(request, response, data)
add_openid_attribute_exchange(request, response, data)
# create http response from OpenID response
webresponse = server.encodeResponse(response)
http_response = HttpResponse(webresponse.body)
http_response.status_code = webresponse.code
# add OpenID headers to response
for k, v in webresponse.headers.iteritems():
http_response[k] = v
return http_response
def validate_trust_root(openid_request):
"""
Only allow OpenID requests from valid trust roots
"""
trusted_roots = getattr(settings, 'OPENID_PROVIDER_TRUSTED_ROOT', None)
if not trusted_roots:
# not using trusted roots
return True
# don't allow empty trust roots
if (not hasattr(openid_request, 'trust_root') or
not openid_request.trust_root):
log.error('no trust_root')
return False
# ensure trust root parses cleanly (one wildcard, of form *.foo.com, etc.)
trust_root = TrustRoot.parse(openid_request.trust_root)
if not trust_root:
log.error('invalid trust_root')
return False
# don't allow empty return tos
if (not hasattr(openid_request, 'return_to') or
not openid_request.return_to):
log.error('empty return_to')
return False
# ensure return to is within trust root
if not trust_root.validateURL(openid_request.return_to):
log.error('invalid return_to')
return False
# check that the root matches the ones we trust
if not any(r for r in trusted_roots if fnmatch.fnmatch(trust_root, r)):
log.error('non-trusted root')
return False
return True
@csrf_exempt
def provider_login(request):
"""
OpenID login endpoint
"""
# make and validate endpoint
endpoint = get_xrds_url('login', request)
if not endpoint:
return default_render_failure(request, "Invalid OpenID request")
# initialize store and server
store = DjangoOpenIDStore()
server = Server(store, endpoint)
# first check to see if the request is an OpenID request.
# If so, the client will have specified an 'openid.mode' as part
# of the request.
querydict = dict(request.REQUEST.items())
error = False
if 'openid.mode' in request.GET or 'openid.mode' in request.POST:
# decode request
try:
openid_request = server.decodeRequest(querydict)
except (UntrustedReturnURL, ProtocolError):
openid_request = None
if not openid_request:
return default_render_failure(request, "Invalid OpenID request")
# don't allow invalid and non-trusted trust roots
if not validate_trust_root(openid_request):
return default_render_failure(request, "Invalid OpenID trust root")
# checkid_immediate not supported, require user interaction
if openid_request.mode == 'checkid_immediate':
return provider_respond(server, openid_request,
openid_request.answer(False), {})
# checkid_setup, so display login page
# (by falling through to the provider_login at the
# bottom of this method).
elif openid_request.mode == 'checkid_setup':
if openid_request.idSelect():
# remember request and original path
request.session['openid_setup'] = {
'request': openid_request,
'url': request.get_full_path()
}
# user failed login on previous attempt
if 'openid_error' in request.session:
error = True
del request.session['openid_error']
# OpenID response
else:
return provider_respond(server, openid_request,
server.handleRequest(openid_request), {})
# handle login redirection: these are also sent to this view function,
# but are distinguished by lacking the openid mode. We also know that
# they are posts, because they come from the popup
elif request.method == 'POST' and 'openid_setup' in request.session:
# get OpenID request from session
openid_setup = request.session['openid_setup']
openid_request = openid_setup['request']
openid_request_url = openid_setup['url']
del request.session['openid_setup']
# don't allow invalid trust roots
if not validate_trust_root(openid_request):
return default_render_failure(request, "Invalid OpenID trust root")
# check if user with given email exists
# Failure is redirected to this method (by using the original URL),
# which will bring up the login dialog.
email = request.POST.get('email', None)
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
request.session['openid_error'] = True
msg = "OpenID login failed - Unknown user email: %s"
AUDIT_LOG.warning(msg, email)
return HttpResponseRedirect(openid_request_url)
# attempt to authenticate user (but not actually log them in...)
# Failure is again redirected to the login dialog.
username = user.username
password = request.POST.get('password', None)
try:
user = authenticate(username=username, password=password, request=request)
except RateLimitException:
AUDIT_LOG.warning('OpenID - Too many failed login attempts.')
return HttpResponseRedirect(openid_request_url)
if user is None:
request.session['openid_error'] = True
msg = "OpenID login failed - password for %s is invalid"
AUDIT_LOG.warning(msg, email)
return HttpResponseRedirect(openid_request_url)
# authentication succeeded, so fetch user information
# that was requested
if user is not None and user.is_active:
# remove error from session since login succeeded
if 'openid_error' in request.session:
del request.session['openid_error']
AUDIT_LOG.info("OpenID login success - %s (%s)",
user.username, user.email)
# redirect user to return_to location
url = endpoint + urlquote(user.username)
response = openid_request.answer(True, None, url)
# TODO: for CS50 we are forcibly returning the username
# instead of fullname. In the OpenID simple registration
# extension, we don't have to return any fields we don't
# want to, even if they were marked as required by the
# Consumer. The behavior of what to do when there are
# missing fields is up to the Consumer. The proper change
# should only return the username, however this will likely
# break the CS50 client. Temporarily we will be returning
# username filling in for fullname in addition to username
# as sreg nickname.
# Note too that this is hardcoded, and not really responding to
# the extensions that were registered in the first place.
results = {
'nickname': user.username,
'email': user.email,
'fullname': user.username
}
# the request succeeded:
return provider_respond(server, openid_request, response, results)
# the account is not active, so redirect back to the login page:
request.session['openid_error'] = True
msg = "Login failed - Account not active for user %s"
AUDIT_LOG.warning(msg, username)
return HttpResponseRedirect(openid_request_url)
# determine consumer domain if applicable
return_to = ''
if 'openid.return_to' in request.REQUEST:
return_to = request.REQUEST['openid.return_to']
matches = re.match(r'\w+:\/\/([\w\.-]+)', return_to)
return_to = matches.group(1)
# display login page
response = render_to_response('provider_login.html', {
'error': error,
'return_to': return_to
})
# add custom XRDS header necessary for discovery process
response['X-XRDS-Location'] = get_xrds_url('xrds', request)
return response
def provider_identity(request):
"""
XRDS for identity discovery
"""
response = render_to_response('identity.xml',
{'url': get_xrds_url('login', request)},
mimetype='text/xml')
# custom XRDS header necessary for discovery process
response['X-XRDS-Location'] = get_xrds_url('identity', request)
return response
def provider_xrds(request):
"""
XRDS for endpoint discovery
"""
response = render_to_response('xrds.xml',
{'url': get_xrds_url('login', request)},
mimetype='text/xml')
# custom XRDS header necessary for discovery process
response['X-XRDS-Location'] = get_xrds_url('xrds', request)
return response
#-------------------
# Pearson
#-------------------
def course_from_id(course_id):
"""Return the CourseDescriptor corresponding to this course_id"""
course_loc = CourseDescriptor.id_to_location(course_id)
return modulestore().get_instance(course_id, course_loc)
@csrf_exempt
def test_center_login(request):
''' Log in students taking exams via Pearson
Takes a POST request that contains the following keys:
- code - a security code provided by Pearson
- clientCandidateID
- registrationID
- exitURL - the url that we redirect to once we're done
- vueExamSeriesCode - a code that indicates the exam that we're using
'''
# errors are returned by navigating to the error_url, adding a query parameter named "code"
# which contains the error code describing the exceptional condition.
def makeErrorURL(error_url, error_code):
log.error("generating error URL with error code {}".format(error_code))
return "{}?code={}".format(error_url, error_code)
# get provided error URL, which will be used as a known prefix for returning error messages to the
# Pearson shell.
error_url = request.POST.get("errorURL")
# TODO: check that the parameters have not been tampered with, by comparing the code provided by Pearson
# with the code we calculate for the same parameters.
if 'code' not in request.POST:
return HttpResponseRedirect(makeErrorURL(error_url, "missingSecurityCode"))
code = request.POST.get("code")
# calculate SHA for query string
# TODO: figure out how to get the original query string, so we can hash it and compare.
if 'clientCandidateID' not in request.POST:
return HttpResponseRedirect(makeErrorURL(error_url, "missingClientCandidateID"))
client_candidate_id = request.POST.get("clientCandidateID")
# TODO: check remaining parameters, and maybe at least log if they're not matching
# expected values....
# registration_id = request.POST.get("registrationID")
# exit_url = request.POST.get("exitURL")
# find testcenter_user that matches the provided ID:
try:
testcenteruser = TestCenterUser.objects.get(client_candidate_id=client_candidate_id)
except TestCenterUser.DoesNotExist:
AUDIT_LOG.error("not able to find demographics for cand ID {}".format(client_candidate_id))
return HttpResponseRedirect(makeErrorURL(error_url, "invalidClientCandidateID"))
AUDIT_LOG.info("Attempting to log in test-center user '{}' for test of cand {}".format(testcenteruser.user.username, client_candidate_id))
# find testcenter_registration that matches the provided exam code:
# Note that we could rely in future on either the registrationId or the exam code,
# or possibly both. But for now we know what to do with an ExamSeriesCode,
# while we currently have no record of RegistrationID values at all.
if 'vueExamSeriesCode' not in request.POST:
# we are not allowed to make up a new error code, according to Pearson,
# so instead of "missingExamSeriesCode", we use a valid one that is
# inaccurate but at least distinct. (Sigh.)
AUDIT_LOG.error("missing exam series code for cand ID {}".format(client_candidate_id))
return HttpResponseRedirect(makeErrorURL(error_url, "missingPartnerID"))
exam_series_code = request.POST.get('vueExamSeriesCode')
registrations = TestCenterRegistration.objects.filter(testcenter_user=testcenteruser, exam_series_code=exam_series_code)
if not registrations:
AUDIT_LOG.error("not able to find exam registration for exam {} and cand ID {}".format(exam_series_code, client_candidate_id))
return HttpResponseRedirect(makeErrorURL(error_url, "noTestsAssigned"))
# TODO: figure out what to do if there are more than one registrations....
# for now, just take the first...
registration = registrations[0]
course_id = registration.course_id
course = course_from_id(course_id) # assume it will be found....
if not course:
AUDIT_LOG.error("not able to find course from ID {} for cand ID {}".format(course_id, client_candidate_id))
return HttpResponseRedirect(makeErrorURL(error_url, "incorrectCandidateTests"))
exam = course.get_test_center_exam(exam_series_code)
if not exam:
AUDIT_LOG.error("not able to find exam {} for course ID {} and cand ID {}".format(exam_series_code, course_id, client_candidate_id))
return HttpResponseRedirect(makeErrorURL(error_url, "incorrectCandidateTests"))
location = exam.exam_url
log.info("Proceeding with test of cand {} on exam {} for course {}: URL = {}".format(client_candidate_id, exam_series_code, course_id, location))
# check if the test has already been taken
timelimit_descriptor = modulestore().get_instance(course_id, Location(location))
if not timelimit_descriptor:
log.error("cand {} on exam {} for course {}: descriptor not found for location {}".format(client_candidate_id, exam_series_code, course_id, location))
return HttpResponseRedirect(makeErrorURL(error_url, "missingClientProgram"))
timelimit_module_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, testcenteruser.user,
timelimit_descriptor, depth=None)
timelimit_module = get_module_for_descriptor(request.user, request, timelimit_descriptor,
timelimit_module_cache, course_id, position=None)
if not timelimit_module.category == 'timelimit':
log.error("cand {} on exam {} for course {}: non-timelimit module at location {}".format(client_candidate_id, exam_series_code, course_id, location))
return HttpResponseRedirect(makeErrorURL(error_url, "missingClientProgram"))
if timelimit_module and timelimit_module.has_ended:
AUDIT_LOG.warning("cand {} on exam {} for course {}: test already over at {}".format(client_candidate_id, exam_series_code, course_id, timelimit_module.ending_at))
return HttpResponseRedirect(makeErrorURL(error_url, "allTestsTaken"))
# check if we need to provide an accommodation:
time_accommodation_mapping = {'ET12ET': 'ADDHALFTIME',
'ET30MN': 'ADD30MIN',
'ETDBTM': 'ADDDOUBLE', }
time_accommodation_code = None
for code in registration.get_accommodation_codes():
if code in time_accommodation_mapping:
time_accommodation_code = time_accommodation_mapping[code]
if time_accommodation_code:
timelimit_module.accommodation_code = time_accommodation_code
AUDIT_LOG.info("cand {} on exam {} for course {}: receiving accommodation {}".format(client_candidate_id, exam_series_code, course_id, time_accommodation_code))
# UGLY HACK!!!
# Login assumes that authentication has occurred, and that there is a
# backend annotation on the user object, indicating which backend
# against which the user was authenticated. We're authenticating here
# against the registration entry, and assuming that the request given
# this information is correct, we allow the user to be logged in
# without a password. This could all be formalized in a backend object
# that does the above checking.
# TODO: (brian) create a backend class to do this.
# testcenteruser.user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
testcenteruser.user.backend = "%s.%s" % ("TestcenterAuthenticationModule", "TestcenterAuthenticationClass")
login(request, testcenteruser.user)
AUDIT_LOG.info("Logged in user '{}' for test of cand {} on exam {} for course {}: URL = {}".format(testcenteruser.user.username, client_candidate_id, exam_series_code, course_id, location))
# And start the test:
return jump_to(request, course_id, location)
|
ftomassetti/intellij-community
|
refs/heads/master
|
python/testData/hierarchy/call/Static/Lambda/file_1.py
|
80
|
def func1(): pass
def func2(): pass
def func3(): pass
def func4(): pass
def func5(): pass
def func6(): pass
def func7(): pass
def func8(): pass
def func9(): pass
def func10(): pass
def func11(): pass
def func12(): pass
def func13(): pass
def func14(): pass
def func15(): pass
def func16(): pass
def func17(): pass
def func18(): pass
|
rimbalinux/LMD3
|
refs/heads/master
|
django/db/models/sql/constants.py
|
13
|
import re
# Valid query types (a dictionary is used for speedy lookups).
QUERY_TERMS = dict([(x, None) for x in (
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
)])
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
# Constants to make looking up tuple values clearer.
# Join lists (indexes into the tuples that are values in the alias_map
# dictionary in the Query class).
TABLE_NAME = 0
RHS_ALIAS = 1
JOIN_TYPE = 2
LHS_ALIAS = 3
LHS_JOIN_COL = 4
RHS_JOIN_COL = 5
NULLABLE = 6
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC')}
|
hkariti/ansible
|
refs/heads/devel
|
lib/ansible/plugins/netconf/junos.py
|
16
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible import constants as C
from ansible.module_utils._text import to_text, to_bytes
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.plugins.netconf import NetconfBase
from ansible.plugins.netconf import ensure_connected
try:
from ncclient import manager
from ncclient.operations import RPCError
from ncclient.transport.errors import SSHUnknownHostError
from ncclient.xml_ import to_ele, to_xml, new_ele
except ImportError:
raise AnsibleError("ncclient is not installed")
class Netconf(NetconfBase):
def get_text(self, ele, tag):
try:
return to_text(ele.find(tag).text, errors='surrogate_then_replace').strip()
except AttributeError:
pass
def get_device_info(self):
device_info = dict()
device_info['network_os'] = 'junos'
ele = new_ele('get-software-information')
data = self.execute_rpc(to_xml(ele))
reply = to_ele(data)
sw_info = reply.find('.//software-information')
device_info['network_os_version'] = self.get_text(sw_info, 'junos-version')
device_info['network_os_hostname'] = self.get_text(sw_info, 'host-name')
device_info['network_os_model'] = self.get_text(sw_info, 'product-model')
return device_info
@ensure_connected
def execute_rpc(self, name):
"""RPC to be execute on remote device
:name: Name of rpc in string format"""
return self.rpc(name)
@ensure_connected
def load_configuration(self, *args, **kwargs):
"""Loads given configuration on device
:format: Format of configuration (xml, text, set)
:action: Action to be performed (merge, replace, override, update)
:target: is the name of the configuration datastore being edited
:config: is the configuration in string format."""
if kwargs.get('config'):
kwargs['config'] = to_bytes(kwargs['config'], errors='surrogate_or_strict')
if kwargs.get('format', 'xml') == 'xml':
kwargs['config'] = to_ele(kwargs['config'])
try:
return self.m.load_configuration(*args, **kwargs).data_xml
except RPCError as exc:
raise Exception(to_xml(exc.xml))
def get_capabilities(self):
result = dict()
result['rpc'] = self.get_base_rpc() + ['commit', 'discard_changes', 'validate', 'lock', 'unlock', 'copy_copy',
'execute_rpc', 'load_configuration', 'get_configuration', 'command',
'reboot', 'halt']
result['network_api'] = 'netconf'
result['device_info'] = self.get_device_info()
result['server_capabilities'] = [c for c in self.m.server_capabilities]
result['client_capabilities'] = [c for c in self.m.client_capabilities]
result['session_id'] = self.m.session_id
return json.dumps(result)
@staticmethod
def guess_network_os(obj):
try:
m = manager.connect(
host=obj._play_context.remote_addr,
port=obj._play_context.port or 830,
username=obj._play_context.remote_user,
password=obj._play_context.password,
key_filename=obj._play_context.private_key_file,
hostkey_verify=C.HOST_KEY_CHECKING,
look_for_keys=C.PARAMIKO_LOOK_FOR_KEYS,
allow_agent=obj._play_context.allow_agent,
timeout=obj._play_context.timeout
)
except SSHUnknownHostError as exc:
raise AnsibleConnectionFailure(str(exc))
guessed_os = None
for c in m.server_capabilities:
if re.search('junos', c):
guessed_os = 'junos'
m.close_session()
return guessed_os
@ensure_connected
def get_configuration(self, *args, **kwargs):
"""Retrieve all or part of a specified configuration.
:format: format in configuration should be retrieved
:filter: specifies the portion of the configuration to retrieve
(by default entire configuration is retrieved)"""
return self.m.get_configuration(*args, **kwargs).data_xml
@ensure_connected
def compare_configuration(self, *args, **kwargs):
"""Compare configuration
:rollback: rollback id"""
return self.m.compare_configuration(*args, **kwargs).data_xml
@ensure_connected
def halt(self):
"""reboot the device"""
return self.m.halt().data_xml
@ensure_connected
def reboot(self):
"""reboot the device"""
return self.m.reboot().data_xml
@ensure_connected
def halt(self):
"""reboot the device"""
return self.m.halt().data_xml
@ensure_connected
def get(self, *args, **kwargs):
try:
return self.m.get(*args, **kwargs).data_xml
except RPCError as exc:
raise Exception(to_xml(exc.xml))
@ensure_connected
def get_config(self, *args, **kwargs):
try:
return self.m.get_config(*args, **kwargs).data_xml
except RPCError as exc:
raise Exception(to_xml(exc.xml))
@ensure_connected
def edit_config(self, *args, **kwargs):
try:
self.m.edit_config(*args, **kwargs).data_xml
except RPCError as exc:
raise Exception(to_xml(exc.xml))
@ensure_connected
def commit(self, *args, **kwargs):
try:
return self.m.commit(*args, **kwargs).data_xml
except RPCError as exc:
raise Exception(to_xml(exc.xml))
@ensure_connected
def validate(self, *args, **kwargs):
return self.m.validate(*args, **kwargs).data_xml
@ensure_connected
def discard_changes(self, *args, **kwargs):
return self.m.discard_changes(*args, **kwargs).data_xml
|
Sorosliu1029/Operating_System
|
refs/heads/master
|
related_info/lab8/disksim-homework.py
|
100
|
#! /usr/bin/env python
from Tkinter import *
from types import *
import math, random, time, sys, os
from optparse import OptionParser
from decimal import *
MAXTRACKS = 1000
# states that a request/disk go through
STATE_NULL = 0
STATE_SEEK = 1
STATE_ROTATE = 2
STATE_XFER = 3
STATE_DONE = 4
#
# TODO
# XXX transfer time
# XXX satf
# XXX skew
# XXX scheduling window
# XXX sstf
# XXX specify requests vs. random requests in range
# XXX add new requests as old ones complete (starvation)
# XXX run in non-graphical mode
# XXX better graphical display (show key, long lists of requests, more timings on screen)
# XXX be able to do "pure" sequential
# XXX add more blocks around outer tracks (zoning)
# XXX simple flag to make scheduling window a fairness window (-F)
# new algs to scan and c-scan the disk?
#
class Disk:
def __init__(self, addr, addrDesc, lateAddr, lateAddrDesc,
policy, seekSpeed, rotateSpeed, skew, window, compute,
graphics, zoning):
self.addr = addr
self.addrDesc = addrDesc
self.lateAddr = lateAddr
self.lateAddrDesc = lateAddrDesc
self.policy = policy
self.seekSpeed = Decimal(seekSpeed)
self.rotateSpeed = Decimal(rotateSpeed)
self.skew = skew
self.window = window
self.compute = compute
self.graphics = graphics
self.zoning = zoning
# figure out zones first, to figure out the max possible request
self.InitBlockLayout()
# figure out requests
random.seed(options.seed)
self.requests = self.MakeRequests(self.addr, self.addrDesc)
self.lateRequests = self.MakeRequests(self.lateAddr, self.lateAddrDesc)
# graphical startup
self.width = 500
if self.graphics:
self.root = Tk()
tmpLen = len(self.requests)
if len(self.lateRequests) > 0:
tmpLen += len(self.lateRequests)
self.canvas = Canvas(self.root, width=410, height=460 + ((tmpLen / 20) * 20))
self.canvas.pack()
# fairness stuff
if self.policy == 'BSATF' and self.window != -1:
self.fairWindow = self.window
else:
self.fairWindow = -1
print 'REQUESTS', self.requests
print ''
# for late requests
self.lateCount = 0
if len(self.lateRequests) > 0:
print 'LATE REQUESTS', self.lateRequests
print ''
if self.compute == False:
print ''
print 'For the requests above, compute the seek, rotate, and transfer times.'
print 'Use -c or the graphical mode (-G) to see the answers.'
print ''
# BINDINGS
if self.graphics:
self.root.bind('s', self.Start)
self.root.bind('p', self.Pause)
self.root.bind('q', self.Exit)
# TRACK INFO
self.tracks = {}
self.trackWidth = 40
self.tracks[0] = 140
self.tracks[1] = self.tracks[0] - self.trackWidth
self.tracks[2] = self.tracks[1] - self.trackWidth
if (self.seekSpeed > 1 and self.trackWidth % self.seekSpeed != 0):
print 'Seek speed (%d) must divide evenly into track width (%d)' % (self.seekSpeed, self.trackWidth)
sys.exit(1)
if self.seekSpeed < 1:
x = (self.trackWidth / self.seekSpeed)
y = int(float(self.trackWidth) / float(self.seekSpeed))
if float(x) != float(y):
print 'Seek speed (%d) must divide evenly into track width (%d)' % (self.seekSpeed, self.trackWidth)
sys.exit(1)
# DISK SURFACE
self.cx = self.width/2.0
self.cy = self.width/2.0
if self.graphics:
self.canvas.create_rectangle(self.cx-175, 30, self.cx - 20, 80, fill='gray', outline='black')
self.platterSize = 320
ps2 = self.platterSize / 2.0
if self.graphics:
self.canvas.create_oval(self.cx-ps2, self.cy-ps2, self.cx+ps2, self.cy + ps2, fill='darkgray', outline='black')
for i in range(len(self.tracks)):
t = self.tracks[i] - (self.trackWidth / 2.0)
if self.graphics:
self.canvas.create_oval(self.cx - t, self.cy - t, self.cx + t, self.cy + t, fill='', outline='black', width=1.0)
# SPINDLE
self.spindleX = self.cx
self.spindleY = self.cy
if self.graphics:
self.spindleID = self.canvas.create_oval(self.spindleX-3, self.spindleY-3, self.spindleX+3, self.spindleY+3, fill='orange', outline='black')
# DISK ARM
self.armTrack = 0
self.armSpeedBase = float(seekSpeed)
self.armSpeed = float(seekSpeed)
distFromSpindle = self.tracks[self.armTrack]
self.armWidth = 20
self.headWidth = 10
self.armX = self.spindleX - (distFromSpindle * math.cos(math.radians(0)))
self.armX1 = self.armX - self.armWidth
self.armX2 = self.armX + self.armWidth
self.armY1 = 50.0
self.armY2 = self.width / 2.0
self.headX1 = self.armX - self.headWidth
self.headX2 = self.armX + self.headWidth
self.headY1 = (self.width/2.0) - self.headWidth
self.headY2 = (self.width/2.0) + self.headWidth
if self.graphics:
self.armID = self.canvas.create_rectangle(self.armX1, self.armY1, self.armX2, self.armY2, fill='gray', outline='black')
self.headID = self.canvas.create_rectangle(self.headX1, self.headY1, self.headX2, self.headY2, fill='gray', outline='black')
self.targetSize = 10.0
if self.graphics:
sz = self.targetSize
self.targetID = self.canvas.create_oval(self.armX1-sz, self.armY1-sz, self.armX1+sz, self.armY1+sz, fill='orange', outline='')
# IO QUEUE
self.queueX = 20
self.queueY = 450
self.requestCount = 0
self.requestQueue = []
self.requestState = []
self.queueBoxSize = 20
self.queueBoxID = {}
self.queueTxtID = {}
# draw each box
for index in range(len(self.requests)):
self.AddQueueEntry(int(self.requests[index]), index)
if self.graphics:
self.canvas.create_text(self.queueX - 5, self.queueY - 20, anchor='w', text='Queue:')
# scheduling window
self.currWindow = self.window
# draw current limits of queue
if self.graphics:
self.windowID = -1
self.DrawWindow()
# initial scheduling info
self.currentIndex = -1
self.currentBlock = -1
# initial state of disk (vs seeking, rotating, transferring)
self.state = STATE_NULL
# DRAW BLOCKS on the TRACKS
for bid in range(len(self.blockInfoList)):
(track, angle, name) = self.blockInfoList[bid]
if self.graphics:
distFromSpindle = self.tracks[track]
xc = self.spindleX + (distFromSpindle * math.cos(math.radians(angle)))
yc = self.spindleY + (distFromSpindle * math.sin(math.radians(angle)))
cid = self.canvas.create_text(xc, yc, text=name, anchor='center')
else:
cid = -1
self.blockInfoList[bid] = (track, angle, name, cid)
# angle of rotation
self.angle = Decimal(0.0)
# TIME INFO
if self.graphics:
self.timeID = self.canvas.create_text(10, 10, text='Time: 0.00', anchor='w')
self.canvas.create_rectangle(95,0,200,18, fill='orange', outline='orange')
self.seekID = self.canvas.create_text(100, 10, text='Seek: 0.00', anchor='w')
self.canvas.create_rectangle(195,0,300,18, fill='lightblue', outline='lightblue')
self.rotID = self.canvas.create_text(200, 10, text='Rotate: 0.00', anchor='w')
self.canvas.create_rectangle(295,0,400,18, fill='green', outline='green')
self.xferID = self.canvas.create_text(300, 10, text='Transfer: 0.00', anchor='w')
self.canvas.create_text(320, 40, text='"s" to start', anchor='w')
self.canvas.create_text(320, 60, text='"p" to pause', anchor='w')
self.canvas.create_text(320, 80, text='"q" to quit', anchor='w')
self.timer = 0
# STATS
self.seekTotal = 0.0
self.rotTotal = 0.0
self.xferTotal = 0.0
# set up animation loop
if self.graphics:
self.doAnimate = True
else:
self.doAnimate = False
self.isDone = False
# call this to start simulation
def Go(self):
if options.graphics:
self.root.mainloop()
else:
self.GetNextIO()
while self.isDone == False:
self.Animate()
# crappy error message
def PrintAddrDescMessage(self, value):
print 'Bad address description (%s)' % value
print 'The address description must be a comma-separated list of length three, without spaces.'
print 'For example, "10,100,0" would indicate that 10 addresses should be generated, with'
print '100 as the maximum value, and 0 as the minumum. A max of -1 means just use the highest'
print 'possible value as the max address to generate.'
sys.exit(1)
#
# ZONES AND BLOCK LAYOUT
#
def InitBlockLayout(self):
self.blockInfoList = []
self.blockToTrackMap = {}
self.blockToAngleMap = {}
self.tracksBeginEnd = {}
self.blockAngleOffset = []
zones = self.zoning.split(',')
assert(len(zones) == 3)
for i in range(len(zones)):
self.blockAngleOffset.append(int(zones[i]) / 2)
track = 0 # outer track
angleOffset = 2 * self.blockAngleOffset[track]
for angle in range(0, 360, angleOffset):
block = angle / angleOffset
self.blockToTrackMap[block] = track
self.blockToAngleMap[block] = angle
self.blockInfoList.append((track, angle, block))
self.tracksBeginEnd[track] = (0, block)
pblock = block + 1
track = 1 # middle track
skew = self.skew
angleOffset = 2 * self.blockAngleOffset[track]
for angle in range(0, 360, angleOffset):
block = (angle / angleOffset) + pblock
self.blockToTrackMap[block] = track
self.blockToAngleMap[block] = angle + (angleOffset * skew)
self.blockInfoList.append((track, angle + (angleOffset * skew), block))
self.tracksBeginEnd[track] = (pblock, block)
pblock = block + 1
track = 2 # inner track
skew = 2 * self.skew
angleOffset = 2 * self.blockAngleOffset[track]
for angle in range(0, 360, angleOffset):
block = (angle / angleOffset) + pblock
self.blockToTrackMap[block] = track
self.blockToAngleMap[block] = angle + (angleOffset * skew)
self.blockInfoList.append((track, angle + (angleOffset * skew), block))
self.tracksBeginEnd[track] = (pblock, block)
self.maxBlock = pblock
# print 'MAX BLOCK:', self.maxBlock
# adjust angle to starting position relative
for i in self.blockToAngleMap:
self.blockToAngleMap[i] = (self.blockToAngleMap[i] + 180) % 360
# print 'btoa map', self.blockToAngleMap
# print 'btot map', self.blockToTrackMap
# print 'bao', self.blockAngleOffset
def MakeRequests(self, addr, addrDesc):
(numRequests, maxRequest, minRequest) = (0, 0, 0)
if addr == '-1':
# first extract values from descriptor
desc = addrDesc.split(',')
if len(desc) != 3:
self.PrintAddrDescMessage(addrDesc)
(numRequests, maxRequest, minRequest) = (int(desc[0]), int(desc[1]), int(desc[2]))
if maxRequest == -1:
maxRequest = self.maxBlock
# now make list
tmpList = []
for i in range(numRequests):
tmpList.append(int(random.random() * maxRequest) + minRequest)
return tmpList
else:
return addr.split(',')
#
# BUTTONS
#
def Start(self, event):
self.GetNextIO()
self.doAnimate = True
self.Animate()
def Pause(self, event):
if self.doAnimate == False:
self.doAnimate = True
else:
self.doAnimate = False
def Exit(self, event):
sys.exit(0)
#
# CORE SIMULATION and ANIMATION
#
def UpdateTime(self):
if self.graphics:
self.canvas.itemconfig(self.timeID, text='Time: ' + str(self.timer))
self.canvas.itemconfig(self.seekID, text='Seek: ' + str(self.seekTotal))
self.canvas.itemconfig(self.rotID, text='Rotate: ' + str(self.rotTotal))
self.canvas.itemconfig(self.xferID, text='Transfer: ' + str(self.xferTotal))
def AddRequest(self, block):
self.AddQueueEntry(block, len(self.requestQueue))
def QueueMap(self, index):
numPerRow = 400 / self.queueBoxSize
return (index % numPerRow, index / numPerRow)
def DrawWindow(self):
if self.window == -1:
return
(col, row) = self.QueueMap(self.currWindow)
if col == 0:
(col, row) = (20, row - 1)
if self.windowID != -1:
self.canvas.delete(self.windowID)
self.windowID = self.canvas.create_line(self.queueX + (col * 20) - 10, self.queueY - 13 + (row * 20),
self.queueX + (col * 20) - 10, self.queueY + 13 + (row * 20), width=2)
def AddQueueEntry(self, block, index):
self.requestQueue.append((block, index))
self.requestState.append(STATE_NULL)
if self.graphics:
(col, row) = self.QueueMap(index)
sizeHalf = self.queueBoxSize / 2.0
(cx, cy) = (self.queueX + (col * self.queueBoxSize), self.queueY + (row * self.queueBoxSize))
self.queueBoxID[index] = self.canvas.create_rectangle(cx - sizeHalf, cy - sizeHalf, cx + sizeHalf, cy + sizeHalf, fill='white')
self.queueTxtID[index] = self.canvas.create_text(cx, cy, anchor='center', text=str(block))
def SwitchColors(self, c):
if self.graphics:
self.canvas.itemconfig(self.queueBoxID[self.currentIndex], fill=c)
self.canvas.itemconfig(self.targetID, fill=c)
def SwitchState(self, newState):
self.state = newState
self.requestState[self.currentIndex] = newState
def RadiallyCloseTo(self, a1, a2):
if a1 > a2:
v = a1 - a2
else:
v = a2 - a1
if v < self.rotateSpeed:
return True
return False
def DoneWithTransfer(self):
angleOffset = self.blockAngleOffset[self.armTrack]
# if int(self.angle) == (self.blockToAngleMap[self.currentBlock] + angleOffset) % 360:
if self.RadiallyCloseTo(self.angle, Decimal((self.blockToAngleMap[self.currentBlock] + angleOffset) % 360)):
# print 'END TRANSFER', self.angle, self.timer
self.SwitchState(STATE_DONE)
self.requestCount += 1
return True
return False
def DoneWithRotation(self):
angleOffset = self.blockAngleOffset[self.armTrack]
# XXX there is a weird bug in here
# print self.timer, 'ROTATE:: ', self.currentBlock, 'currangle: ', self.angle, ' - mapangle: ', self.blockToAngleMap[self.currentBlock]
# print ' angleOffset ', angleOffset
# print ' blockMap ', (self.blockToAngleMap[self.currentBlock] - angleOffset) % 360
# print ' self.angle ', self.angle, int(self.angle)
# if int(self.angle) == (self.blockToAngleMap[self.currentBlock] - angleOffset) % 360:
if self.RadiallyCloseTo(self.angle, Decimal((self.blockToAngleMap[self.currentBlock] - angleOffset) % 360)):
self.SwitchState(STATE_XFER)
# print ' --> DONE WITH ROTATION!', self.timer
return True
return False
def PlanSeek(self, track):
self.seekBegin = self.timer
self.SwitchColors('orange')
self.SwitchState(STATE_SEEK)
if track == self.armTrack:
self.rotBegin = self.timer
self.SwitchColors('lightblue')
self.SwitchState(STATE_ROTATE)
return
self.armTarget = track
self.armTargetX1 = self.spindleX - self.tracks[track] - (self.trackWidth / 2.0)
if track >= self.armTrack:
self.armSpeed = self.armSpeedBase
else:
self.armSpeed = - self.armSpeedBase
def DoneWithSeek(self):
# move the disk arm
self.armX1 += self.armSpeed
self.armX2 += self.armSpeed
self.headX1 += self.armSpeed
self.headX2 += self.armSpeed
# update it on screen
if self.graphics:
self.canvas.coords(self.armID, self.armX1, self.armY1, self.armX2, self.armY2)
self.canvas.coords(self.headID, self.headX1, self.headY1, self.headX2, self.headY2)
# check if done
if (self.armSpeed > 0.0 and self.armX1 >= self.armTargetX1) or (self.armSpeed < 0.0 and self.armX1 <= self.armTargetX1):
self.armTrack = self.armTarget
return True
return False
def DoSATF(self, rList):
minBlock = -1
minIndex = -1
minEst = -1
# print '**** DoSATF ****', rList
for (block, index) in rList:
if self.requestState[index] == STATE_DONE:
continue
track = self.blockToTrackMap[block]
angle = self.blockToAngleMap[block]
# print 'track', track, 'angle', angle
# estimate seek time
dist = int(math.fabs(self.armTrack - track))
seekEst = Decimal(self.trackWidth / self.armSpeedBase) * dist
# estimate rotate time
angleOffset = self.blockAngleOffset[track]
angleAtArrival = (Decimal(self.angle) + (seekEst * self.rotateSpeed))
while angleAtArrival > 360.0:
angleAtArrival -= 360.0
rotDist = Decimal((angle - angleOffset) - angleAtArrival)
while rotDist > 360.0:
rotDist -= Decimal(360.0)
while rotDist < 0.0:
rotDist += Decimal(360.0)
rotEst = rotDist / self.rotateSpeed
# finally, transfer
xferEst = (Decimal(angleOffset) * Decimal(2.0)) / self.rotateSpeed
totalEst = seekEst + rotEst + xferEst
# should probably pick one on same track in case of a TIE
if minEst == -1 or totalEst < minEst:
minEst = totalEst
minBlock = block
minIndex = index
# END loop
# when done
self.totalEst = minEst
assert(minBlock != -1)
assert(minIndex != -1)
return (minBlock, minIndex)
#
# actually doesn't quite do SSTF
# just finds all the blocks on the nearest track
# (whatever that may be) and returns it as a list
#
def DoSSTF(self, rList):
minDist = MAXTRACKS
minBlock = -1
trackList = [] # all the blocks on a track
for (block, index) in rList:
if self.requestState[index] == STATE_DONE:
continue
track = self.blockToTrackMap[block]
dist = int(math.fabs(self.armTrack - track))
if dist < minDist:
trackList = []
trackList.append((block, index))
minDist = dist
elif dist == minDist:
trackList.append((block, index))
assert(trackList != [])
return trackList
def UpdateWindow(self):
if self.fairWindow == -1 and self.currWindow > 0 and self.currWindow < len(self.requestQueue):
self.currWindow += 1
if self.graphics:
self.DrawWindow()
def GetWindow(self):
if self.currWindow <= -1:
return len(self.requestQueue)
else:
if self.fairWindow != -1:
if self.requestCount > 0 and (self.requestCount % self.fairWindow == 0):
self.currWindow = self.currWindow + self.fairWindow
if self.currWindow > len(self.requestQueue):
self.currWindow = len(self.requestQueue)
if self.graphics:
self.DrawWindow()
return self.currWindow
else:
return self.currWindow
def GetNextIO(self):
# check if done: if so, print stats and end animation
if self.requestCount == len(self.requestQueue):
self.UpdateTime()
self.PrintStats()
self.doAnimate = False
self.isDone = True
return
# do policy: should set currentBlock,
if self.policy == 'FIFO':
(self.currentBlock, self.currentIndex) = self.requestQueue[self.requestCount]
self.DoSATF(self.requestQueue[self.requestCount:self.requestCount+1])
elif self.policy == 'SATF' or self.policy == 'BSATF':
(self.currentBlock, self.currentIndex) = self.DoSATF(self.requestQueue[0:self.GetWindow()])
elif self.policy == 'SSTF':
# first, find all the blocks on a given track (given window constraints)
trackList = self.DoSSTF(self.requestQueue[0:self.GetWindow()])
# then, do SATF on those blocks (otherwise, will not do them in obvious order)
(self.currentBlock, self.currentIndex) = self.DoSATF(trackList)
else:
print 'policy (%s) not implemented' % self.policy
sys.exit(1)
# once best block is decided, go ahead and do the seek
self.PlanSeek(self.blockToTrackMap[self.currentBlock])
# add another block?
if len(self.lateRequests) > 0 and self.lateCount < len(self.lateRequests):
self.AddRequest(self.lateRequests[self.lateCount])
self.lateCount += 1
def Animate(self):
if self.graphics == True and self.doAnimate == False:
self.root.after(20, self.Animate)
return
# timer
self.timer += 1
self.UpdateTime()
# see which blocks are rotating on the disk
# print 'SELF ANGLE', self.angle
self.angle = Decimal(self.angle + self.rotateSpeed)
if self.angle >= 360.0:
self.angle = Decimal(0.0)
# move the blocks
if self.graphics:
for (track, angle, name, cid) in self.blockInfoList:
distFromSpindle = self.tracks[track]
na = angle - self.angle
xc = self.spindleX + (distFromSpindle * math.cos(math.radians(na)))
yc = self.spindleY + (distFromSpindle * math.sin(math.radians(na)))
if self.graphics:
self.canvas.coords(cid, xc, yc)
if self.currentBlock == name:
sz = self.targetSize
self.canvas.coords(self.targetID, xc-sz, yc-sz, xc+sz, yc+sz)
# move the arm OR wait for a rotational delay
if self.state == STATE_SEEK:
if self.DoneWithSeek():
self.rotBegin = self.timer
self.SwitchState(STATE_ROTATE)
self.SwitchColors('lightblue')
if self.state == STATE_ROTATE:
# check for read (disk arm must be settled)
if self.DoneWithRotation():
self.xferBegin = self.timer
self.SwitchState(STATE_XFER)
self.SwitchColors('green')
if self.state == STATE_XFER:
if self.DoneWithTransfer():
self.DoRequestStats()
self.SwitchState(STATE_DONE)
self.SwitchColors('red')
self.UpdateWindow()
currentBlock = self.currentBlock
self.GetNextIO()
nextBlock = self.currentBlock
if self.blockToTrackMap[currentBlock] == self.blockToTrackMap[nextBlock]:
if (currentBlock == self.tracksBeginEnd[self.armTrack][1] and nextBlock == self.tracksBeginEnd[self.armTrack][0]) or (currentBlock + 1 == nextBlock):
# need a special case here: to handle when we stay in transfer mode
(self.rotBegin, self.seekBegin, self.xferBegin) = (self.timer, self.timer, self.timer)
self.SwitchState(STATE_XFER)
self.SwitchColors('green')
# make sure to keep the animation going!
if self.graphics:
self.root.after(20, self.Animate)
def DoRequestStats(self):
seekTime = self.rotBegin - self.seekBegin
rotTime = self.xferBegin - self.rotBegin
xferTime = self.timer - self.xferBegin
totalTime = self.timer - self.seekBegin
if self.compute == True:
print 'Block: %3d Seek:%3d Rotate:%3d Transfer:%3d Total:%4d' % (self.currentBlock, seekTime, rotTime, xferTime, totalTime)
# if int(totalTime) != int(self.totalEst):
# print 'INTERNAL ERROR: estimate was', self.totalEst, 'whereas actual time to access block was', totalTime
# print 'Please report this bug and as much information as possible so as to make it easy to recreate. Thanks!'
# update stats
self.seekTotal += seekTime
self.rotTotal += rotTime
self.xferTotal += xferTime
def PrintStats(self):
if self.compute == True:
print '\nTOTALS Seek:%3d Rotate:%3d Transfer:%3d Total:%4d\n' % (self.seekTotal, self.rotTotal, self.xferTotal, self.timer)
# END: class Disk
#
# MAIN SIMULATOR
#
parser = OptionParser()
parser.add_option('-s', '--seed', default='0', help='Random seed', action='store', type='int', dest='seed')
parser.add_option('-a', '--addr', default='-1', help='Request list (comma-separated) [-1 -> use addrDesc]', action='store', type='string', dest='addr')
parser.add_option('-A', '--addrDesc', default='5,-1,0', help='Num requests, max request (-1->all), min request', action='store', type='string', dest='addrDesc')
parser.add_option('-S', '--seekSpeed', default='1', help='Speed of seek', action='store', type='string', dest='seekSpeed')
parser.add_option('-R', '--rotSpeed', default='1', help='Speed of rotation', action='store', type='string', dest='rotateSpeed')
parser.add_option('-p', '--policy', default='FIFO', help='Scheduling policy (FIFO, SSTF, SATF, BSATF)', action='store', type='string', dest='policy')
parser.add_option('-w', '--schedWindow', default=-1, help='Size of scheduling window (-1 -> all)', action='store', type='int', dest='window')
parser.add_option('-o', '--skewOffset', default=0, help='Amount of skew (in blocks)', action='store', type='int', dest='skew')
parser.add_option('-z', '--zoning', default='30,30,30', help='Angles between blocks on outer,middle,inner tracks', action='store', type='string', dest='zoning')
parser.add_option('-G', '--graphics', default=False, help='Turn on graphics', action='store_true', dest='graphics')
parser.add_option('-l', '--lateAddr', default='-1', help='Late: request list (comma-separated) [-1 -> random]', action='store', type='string', dest='lateAddr')
parser.add_option('-L', '--lateAddrDesc', default='0,-1,0', help='Num requests, max request (-1->all), min request', action='store', type='string', dest='lateAddrDesc')
parser.add_option('-c', '--compute', default=False, help='Compute the answers', action='store_true', dest='compute')
(options, args) = parser.parse_args()
print 'OPTIONS seed', options.seed
print 'OPTIONS addr', options.addr
print 'OPTIONS addrDesc', options.addrDesc
print 'OPTIONS seekSpeed', options.seekSpeed
print 'OPTIONS rotateSpeed', options.rotateSpeed
print 'OPTIONS skew', options.skew
print 'OPTIONS window', options.window
print 'OPTIONS policy', options.policy
print 'OPTIONS compute', options.compute
print 'OPTIONS graphics', options.graphics
print 'OPTIONS zoning', options.zoning
print 'OPTIONS lateAddr', options.lateAddr
print 'OPTIONS lateAddrDesc', options.lateAddrDesc
print ''
if options.window == 0:
print 'Scheduling window (%d) must be positive or -1 (which means a full window)' % options.window
sys.exit(1)
if options.graphics and options.compute == False:
print '\nWARNING: Setting compute flag to True, as graphics are on\n'
options.compute = True
# set up simulator info
d = Disk(addr=options.addr, addrDesc=options.addrDesc, lateAddr=options.lateAddr, lateAddrDesc=options.lateAddrDesc,
policy=options.policy, seekSpeed=Decimal(options.seekSpeed), rotateSpeed=Decimal(options.rotateSpeed),
skew=options.skew, window=options.window, compute=options.compute, graphics=options.graphics, zoning=options.zoning)
# run simulation
d.Go()
|
googleads/google-ads-python
|
refs/heads/master
|
google/ads/googleads/v8/enums/types/call_tracking_display_location.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"CallTrackingDisplayLocationEnum",},
)
class CallTrackingDisplayLocationEnum(proto.Message):
r"""Container for enum describing possible call tracking display
locations.
"""
class CallTrackingDisplayLocation(proto.Enum):
r"""Possible call tracking display locations."""
UNSPECIFIED = 0
UNKNOWN = 1
AD = 2
LANDING_PAGE = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
xsynergy510x/android_external_chromium_org
|
refs/heads/cm-12.1
|
third_party/motemplate/motemplate.py
|
93
|
# Copyright 2012 Benjamin Kalman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Escaping control characters somehow. e.g. \{{, \{{-.
import json
import re
'''Motemplate templates are data binding templates more-than-loosely inspired by
ctemplate. Use like:
from motemplate import Motemplate
template = Motemplate('hello {{#foo bar/}} world')
input = {
'foo': [
{ 'bar': 1 },
{ 'bar': 2 },
{ 'bar': 3 }
]
}
print(template.render(input).text)
Motemplate will use get() on contexts to return values, so to create custom
getters (for example, something that populates values lazily from keys), just
provide an object with a get() method.
class CustomContext(object):
def get(self, key):
return 10
print(Motemplate('hello {{world}}').render(CustomContext()).text)
will print 'hello 10'.
'''
class ParseException(Exception):
'''The exception thrown while parsing a template.
'''
def __init__(self, error):
Exception.__init__(self, error)
class RenderResult(object):
'''The result of a render operation.
'''
def __init__(self, text, errors):
self.text = text;
self.errors = errors
def __repr__(self):
return '%s(text=%s, errors=%s)' % (type(self).__name__,
self.text,
self.errors)
def __str__(self):
return repr(self)
class _StringBuilder(object):
'''Efficiently builds strings.
'''
def __init__(self):
self._buf = []
def __len__(self):
self._Collapse()
return len(self._buf[0])
def Append(self, string):
if not isinstance(string, basestring):
string = str(string)
self._buf.append(string)
def ToString(self):
self._Collapse()
return self._buf[0]
def _Collapse(self):
self._buf = [u''.join(self._buf)]
def __repr__(self):
return self.ToString()
def __str__(self):
return repr(self)
class _Contexts(object):
'''Tracks a stack of context objects, providing efficient key/value retrieval.
'''
class _Node(object):
'''A node within the stack. Wraps a real context and maintains the key/value
pairs seen so far.
'''
def __init__(self, value):
self._value = value
self._value_has_get = hasattr(value, 'get')
self._found = {}
def GetKeys(self):
'''Returns the list of keys that |_value| contains.
'''
return self._found.keys()
def Get(self, key):
'''Returns the value for |key|, or None if not found (including if
|_value| doesn't support key retrieval).
'''
if not self._value_has_get:
return None
value = self._found.get(key)
if value is not None:
return value
value = self._value.get(key)
if value is not None:
self._found[key] = value
return value
def __repr__(self):
return 'Node(value=%s, found=%s)' % (self._value, self._found)
def __str__(self):
return repr(self)
def __init__(self, globals_):
'''Initializes with the initial global contexts, listed in order from most
to least important.
'''
self._nodes = map(_Contexts._Node, globals_)
self._first_local = len(self._nodes)
self._value_info = {}
def CreateFromGlobals(self):
new = _Contexts([])
new._nodes = self._nodes[:self._first_local]
new._first_local = self._first_local
return new
def Push(self, context):
self._nodes.append(_Contexts._Node(context))
def Pop(self):
node = self._nodes.pop()
assert len(self._nodes) >= self._first_local
for found_key in node.GetKeys():
# [0] is the stack of nodes that |found_key| has been found in.
self._value_info[found_key][0].pop()
def FirstLocal(self):
if len(self._nodes) == self._first_local:
return None
return self._nodes[-1]._value
def Resolve(self, path):
# This method is only efficient at finding |key|; if |tail| has a value (and
# |key| evaluates to an indexable value) we'll need to descend into that.
key, tail = path.split('.', 1) if '.' in path else (path, None)
found = self._FindNodeValue(key)
if tail is None:
return found
for part in tail.split('.'):
if not hasattr(found, 'get'):
return None
found = found.get(part)
return found
def Scope(self, context, fn, *args):
self.Push(context)
try:
return fn(*args)
finally:
self.Pop()
def _FindNodeValue(self, key):
# |found_node_list| will be all the nodes that |key| has been found in.
# |checked_node_set| are those that have been checked.
info = self._value_info.get(key)
if info is None:
info = ([], set())
self._value_info[key] = info
found_node_list, checked_node_set = info
# Check all the nodes not yet checked for |key|.
newly_found = []
for node in reversed(self._nodes):
if node in checked_node_set:
break
value = node.Get(key)
if value is not None:
newly_found.append(node)
checked_node_set.add(node)
# The nodes will have been found in reverse stack order. After extending
# the found nodes, the freshest value will be at the tip of the stack.
found_node_list.extend(reversed(newly_found))
if not found_node_list:
return None
return found_node_list[-1]._value.get(key)
class _Stack(object):
class Entry(object):
def __init__(self, name, id_):
self.name = name
self.id_ = id_
def __init__(self, entries=[]):
self.entries = entries
def Descend(self, name, id_):
descended = list(self.entries)
descended.append(_Stack.Entry(name, id_))
return _Stack(entries=descended)
class _InternalContext(object):
def __init__(self):
self._render_state = None
def SetRenderState(self, render_state):
self._render_state = render_state
def get(self, key):
if key == 'errors':
errors = self._render_state._errors
return '\n'.join(errors) if errors else None
return None
class _RenderState(object):
'''The state of a render call.
'''
def __init__(self, name, contexts, _stack=_Stack()):
self.text = _StringBuilder()
self.contexts = contexts
self._name = name
self._errors = []
self._stack = _stack
def AddResolutionError(self, id_, description=None):
message = id_.CreateResolutionErrorMessage(self._name, stack=self._stack)
if description is not None:
message = '%s (%s)' % (message, description)
self._errors.append(message)
def Copy(self):
return _RenderState(
self._name, self.contexts, _stack=self._stack)
def ForkPartial(self, custom_name, id_):
name = custom_name or id_.name
return _RenderState(name,
self.contexts.CreateFromGlobals(),
_stack=self._stack.Descend(name, id_))
def Merge(self, render_state, text_transform=None):
self._errors.extend(render_state._errors)
text = render_state.text.ToString()
if text_transform is not None:
text = text_transform(text)
self.text.Append(text)
def GetResult(self):
return RenderResult(self.text.ToString(), self._errors);
class _Identifier(object):
'''An identifier of the form 'foo', 'foo.bar.baz', 'foo-bar.baz', etc.
'''
_VALID_ID_MATCHER = re.compile(r'^[a-zA-Z0-9@_/-]+$')
def __init__(self, name, line, column):
self.name = name
self.line = line
self.column = column
if name == '':
raise ParseException('Empty identifier %s' % self.GetDescription())
for part in name.split('.'):
if not _Identifier._VALID_ID_MATCHER.match(part):
raise ParseException('Invalid identifier %s' % self.GetDescription())
def GetDescription(self):
return '\'%s\' at line %s column %s' % (self.name, self.line, self.column)
def CreateResolutionErrorMessage(self, name, stack=None):
message = _StringBuilder()
message.Append('Failed to resolve %s in %s\n' % (self.GetDescription(),
name))
if stack is not None:
for entry in reversed(stack.entries):
message.Append(' included as %s in %s\n' % (entry.id_.GetDescription(),
entry.name))
return message.ToString().strip()
def __repr__(self):
return self.name
def __str__(self):
return repr(self)
class _Node(object): pass
class _LeafNode(_Node):
def __init__(self, start_line, end_line):
self._start_line = start_line
self._end_line = end_line
def StartsWithNewLine(self):
return False
def TrimStartingNewLine(self):
pass
def TrimEndingSpaces(self):
return 0
def TrimEndingNewLine(self):
pass
def EndsWithEmptyLine(self):
return False
def GetStartLine(self):
return self._start_line
def GetEndLine(self):
return self._end_line
def __str__(self):
return repr(self)
class _DecoratorNode(_Node):
def __init__(self, content):
self._content = content
def StartsWithNewLine(self):
return self._content.StartsWithNewLine()
def TrimStartingNewLine(self):
self._content.TrimStartingNewLine()
def TrimEndingSpaces(self):
return self._content.TrimEndingSpaces()
def TrimEndingNewLine(self):
self._content.TrimEndingNewLine()
def EndsWithEmptyLine(self):
return self._content.EndsWithEmptyLine()
def GetStartLine(self):
return self._content.GetStartLine()
def GetEndLine(self):
return self._content.GetEndLine()
def __repr__(self):
return str(self._content)
def __str__(self):
return repr(self)
class _InlineNode(_DecoratorNode):
def __init__(self, content):
_DecoratorNode.__init__(self, content)
def Render(self, render_state):
content_render_state = render_state.Copy()
self._content.Render(content_render_state)
render_state.Merge(content_render_state,
text_transform=lambda text: text.replace('\n', ''))
class _IndentedNode(_DecoratorNode):
def __init__(self, content, indentation):
_DecoratorNode.__init__(self, content)
self._indent_str = ' ' * indentation
def Render(self, render_state):
if isinstance(self._content, _CommentNode):
return
def inlinify(text):
if len(text) == 0: # avoid rendering a blank line
return ''
buf = _StringBuilder()
buf.Append(self._indent_str)
buf.Append(text.replace('\n', '\n%s' % self._indent_str))
if not text.endswith('\n'): # partials will often already end in a \n
buf.Append('\n')
return buf.ToString()
content_render_state = render_state.Copy()
self._content.Render(content_render_state)
render_state.Merge(content_render_state, text_transform=inlinify)
class _BlockNode(_DecoratorNode):
def __init__(self, content):
_DecoratorNode.__init__(self, content)
content.TrimStartingNewLine()
content.TrimEndingSpaces()
def Render(self, render_state):
self._content.Render(render_state)
class _NodeCollection(_Node):
def __init__(self, nodes):
assert nodes
self._nodes = nodes
def Render(self, render_state):
for node in self._nodes:
node.Render(render_state)
def StartsWithNewLine(self):
return self._nodes[0].StartsWithNewLine()
def TrimStartingNewLine(self):
self._nodes[0].TrimStartingNewLine()
def TrimEndingSpaces(self):
return self._nodes[-1].TrimEndingSpaces()
def TrimEndingNewLine(self):
self._nodes[-1].TrimEndingNewLine()
def EndsWithEmptyLine(self):
return self._nodes[-1].EndsWithEmptyLine()
def GetStartLine(self):
return self._nodes[0].GetStartLine()
def GetEndLine(self):
return self._nodes[-1].GetEndLine()
def __repr__(self):
return ''.join(str(node) for node in self._nodes)
class _StringNode(_Node):
'''Just a string.
'''
def __init__(self, string, start_line, end_line):
self._string = string
self._start_line = start_line
self._end_line = end_line
def Render(self, render_state):
render_state.text.Append(self._string)
def StartsWithNewLine(self):
return self._string.startswith('\n')
def TrimStartingNewLine(self):
if self.StartsWithNewLine():
self._string = self._string[1:]
def TrimEndingSpaces(self):
original_length = len(self._string)
self._string = self._string[:self._LastIndexOfSpaces()]
return original_length - len(self._string)
def TrimEndingNewLine(self):
if self._string.endswith('\n'):
self._string = self._string[:len(self._string) - 1]
def EndsWithEmptyLine(self):
index = self._LastIndexOfSpaces()
return index == 0 or self._string[index - 1] == '\n'
def _LastIndexOfSpaces(self):
index = len(self._string)
while index > 0 and self._string[index - 1] == ' ':
index -= 1
return index
def GetStartLine(self):
return self._start_line
def GetEndLine(self):
return self._end_line
def __repr__(self):
return self._string
class _EscapedVariableNode(_LeafNode):
'''{{foo}}
'''
def __init__(self, id_):
_LeafNode.__init__(self, id_.line, id_.line)
self._id = id_
def Render(self, render_state):
value = render_state.contexts.Resolve(self._id.name)
if value is None:
render_state.AddResolutionError(self._id)
return
string = value if isinstance(value, basestring) else str(value)
render_state.text.Append(string.replace('&', '&')
.replace('<', '<')
.replace('>', '>'))
def __repr__(self):
return '{{%s}}' % self._id
class _UnescapedVariableNode(_LeafNode):
'''{{{foo}}}
'''
def __init__(self, id_):
_LeafNode.__init__(self, id_.line, id_.line)
self._id = id_
def Render(self, render_state):
value = render_state.contexts.Resolve(self._id.name)
if value is None:
render_state.AddResolutionError(self._id)
return
string = value if isinstance(value, basestring) else str(value)
render_state.text.Append(string)
def __repr__(self):
return '{{{%s}}}' % self._id
class _CommentNode(_LeafNode):
'''{{- This is a comment -}}
An empty placeholder node for correct indented rendering behaviour.
'''
def __init__(self, start_line, end_line):
_LeafNode.__init__(self, start_line, end_line)
def Render(self, render_state):
pass
def __repr__(self):
return '<comment>'
class _SectionNode(_DecoratorNode):
'''{{#var:foo}} ... {{/foo}}
'''
def __init__(self, bind_to, id_, content):
_DecoratorNode.__init__(self, content)
self._bind_to = bind_to
self._id = id_
def Render(self, render_state):
value = render_state.contexts.Resolve(self._id.name)
if isinstance(value, list):
for item in value:
if self._bind_to is not None:
render_state.contexts.Scope({self._bind_to.name: item},
self._content.Render, render_state)
else:
self._content.Render(render_state)
elif hasattr(value, 'get'):
if self._bind_to is not None:
render_state.contexts.Scope({self._bind_to.name: value},
self._content.Render, render_state)
else:
render_state.contexts.Scope(value, self._content.Render, render_state)
else:
render_state.AddResolutionError(self._id)
def __repr__(self):
return '{{#%s}}%s{{/%s}}' % (
self._id, _DecoratorNode.__repr__(self), self._id)
class _VertedSectionNode(_DecoratorNode):
'''{{?var:foo}} ... {{/foo}}
'''
def __init__(self, bind_to, id_, content):
_DecoratorNode.__init__(self, content)
self._bind_to = bind_to
self._id = id_
def Render(self, render_state):
value = render_state.contexts.Resolve(self._id.name)
if _VertedSectionNode.ShouldRender(value):
if self._bind_to is not None:
render_state.contexts.Scope({self._bind_to.name: value},
self._content.Render, render_state)
else:
self._content.Render(render_state)
def __repr__(self):
return '{{?%s}}%s{{/%s}}' % (
self._id, _DecoratorNode.__repr__(self), self._id)
@staticmethod
def ShouldRender(value):
if value is None:
return False
if isinstance(value, bool):
return value
if isinstance(value, list):
return len(value) > 0
return True
class _InvertedSectionNode(_DecoratorNode):
'''{{^foo}} ... {{/foo}}
'''
def __init__(self, bind_to, id_, content):
_DecoratorNode.__init__(self, content)
if bind_to is not None:
raise ParseException('{{^%s:%s}} does not support variable binding'
% (bind_to, id_))
self._id = id_
def Render(self, render_state):
value = render_state.contexts.Resolve(self._id.name)
if not _VertedSectionNode.ShouldRender(value):
self._content.Render(render_state)
def __repr__(self):
return '{{^%s}}%s{{/%s}}' % (
self._id, _DecoratorNode.__repr__(self), self._id)
class _AssertionNode(_LeafNode):
'''{{!foo Some comment about foo}}
'''
def __init__(self, id_, description):
_LeafNode.__init__(self, id_.line, id_.line)
self._id = id_
self._description = description
def Render(self, render_state):
if render_state.contexts.Resolve(self._id.name) is None:
render_state.AddResolutionError(self._id, description=self._description)
def __repr__(self):
return '{{!%s %s}}' % (self._id, self._description)
class _JsonNode(_LeafNode):
'''{{*foo}}
'''
def __init__(self, id_):
_LeafNode.__init__(self, id_.line, id_.line)
self._id = id_
def Render(self, render_state):
value = render_state.contexts.Resolve(self._id.name)
if value is None:
render_state.AddResolutionError(self._id)
return
render_state.text.Append(json.dumps(value, separators=(',',':')))
def __repr__(self):
return '{{*%s}}' % self._id
class _PartialNodeWithArguments(_DecoratorNode):
def __init__(self, partial, args):
if isinstance(partial, Motemplate):
# Preserve any get() method that the caller has added.
if hasattr(partial, 'get'):
self.get = partial.get
partial = partial._top_node
_DecoratorNode.__init__(self, partial)
self._partial = partial
self._args = args
def Render(self, render_state):
render_state.contexts.Scope(self._args, self._partial.Render, render_state)
class _PartialNodeInContext(_DecoratorNode):
def __init__(self, partial, context):
if isinstance(partial, Motemplate):
# Preserve any get() method that the caller has added.
if hasattr(partial, 'get'):
self.get = partial.get
partial = partial._top_node
_DecoratorNode.__init__(self, partial)
self._partial = partial
self._context = context
def Render(self, render_state):
original_contexts = render_state.contexts
try:
render_state.contexts = self._context
render_state.contexts.Scope(
# The first local context of |original_contexts| will be the
# arguments that were passed to the partial, if any.
original_contexts.FirstLocal() or {},
self._partial.Render, render_state)
finally:
render_state.contexts = original_contexts
class _PartialNode(_LeafNode):
'''{{+var:foo}} ... {{/foo}}
'''
def __init__(self, bind_to, id_, content):
_LeafNode.__init__(self, id_.line, id_.line)
self._bind_to = bind_to
self._id = id_
self._content = content
self._args = None
self._pass_through_id = None
@classmethod
def Inline(cls, id_):
return cls(None, id_, None)
def Render(self, render_state):
value = render_state.contexts.Resolve(self._id.name)
if value is None:
render_state.AddResolutionError(self._id)
return
if not isinstance(value, (Motemplate, _Node)):
render_state.AddResolutionError(self._id, description='not a partial')
return
if isinstance(value, Motemplate):
node, name = value._top_node, value._name
else:
node, name = value, None
partial_render_state = render_state.ForkPartial(name, self._id)
arg_context = {}
if self._pass_through_id is not None:
context = render_state.contexts.Resolve(self._pass_through_id.name)
if context is not None:
arg_context[self._pass_through_id.name] = context
if self._args is not None:
def resolve_args(args):
resolved = {}
for key, value in args.iteritems():
if isinstance(value, dict):
assert len(value.keys()) == 1
id_of_partial, partial_args = value.items()[0]
partial = render_state.contexts.Resolve(id_of_partial.name)
if partial is not None:
resolved[key] = _PartialNodeWithArguments(
partial, resolve_args(partial_args))
else:
context = render_state.contexts.Resolve(value.name)
if context is not None:
resolved[key] = context
return resolved
arg_context.update(resolve_args(self._args))
if self._bind_to and self._content:
arg_context[self._bind_to.name] = _PartialNodeInContext(
self._content, render_state.contexts)
if arg_context:
partial_render_state.contexts.Push(arg_context)
node.Render(partial_render_state)
render_state.Merge(
partial_render_state,
text_transform=lambda text: text[:-1] if text.endswith('\n') else text)
def SetArguments(self, args):
self._args = args
def PassThroughArgument(self, id_):
self._pass_through_id = id_
def __repr__(self):
return '{{+%s}}' % self._id
_TOKENS = {}
class _Token(object):
'''The tokens that can appear in a template.
'''
class Data(object):
def __init__(self, name, text, clazz):
self.name = name
self.text = text
self.clazz = clazz
_TOKENS[text] = self
def ElseNodeClass(self):
if self.clazz == _VertedSectionNode:
return _InvertedSectionNode
if self.clazz == _InvertedSectionNode:
return _VertedSectionNode
return None
def __repr__(self):
return self.name
def __str__(self):
return repr(self)
OPEN_START_SECTION = Data(
'OPEN_START_SECTION' , '{{#', _SectionNode)
OPEN_START_VERTED_SECTION = Data(
'OPEN_START_VERTED_SECTION' , '{{?', _VertedSectionNode)
OPEN_START_INVERTED_SECTION = Data(
'OPEN_START_INVERTED_SECTION', '{{^', _InvertedSectionNode)
OPEN_ASSERTION = Data(
'OPEN_ASSERTION' , '{{!', _AssertionNode)
OPEN_JSON = Data(
'OPEN_JSON' , '{{*', _JsonNode)
OPEN_PARTIAL = Data(
'OPEN_PARTIAL' , '{{+', _PartialNode)
OPEN_ELSE = Data(
'OPEN_ELSE' , '{{:', None)
OPEN_END_SECTION = Data(
'OPEN_END_SECTION' , '{{/', None)
INLINE_END_SECTION = Data(
'INLINE_END_SECTION' , '/}}', None)
OPEN_UNESCAPED_VARIABLE = Data(
'OPEN_UNESCAPED_VARIABLE' , '{{{', _UnescapedVariableNode)
CLOSE_MUSTACHE3 = Data(
'CLOSE_MUSTACHE3' , '}}}', None)
OPEN_COMMENT = Data(
'OPEN_COMMENT' , '{{-', _CommentNode)
CLOSE_COMMENT = Data(
'CLOSE_COMMENT' , '-}}', None)
OPEN_VARIABLE = Data(
'OPEN_VARIABLE' , '{{' , _EscapedVariableNode)
CLOSE_MUSTACHE = Data(
'CLOSE_MUSTACHE' , '}}' , None)
CHARACTER = Data(
'CHARACTER' , '.' , None)
class _TokenStream(object):
'''Tokeniser for template parsing.
'''
def __init__(self, string):
self.next_token = None
self.next_line = 1
self.next_column = 0
self._string = string
self._cursor = 0
self.Advance()
def HasNext(self):
return self.next_token is not None
def NextCharacter(self):
if self.next_token is _Token.CHARACTER:
return self._string[self._cursor - 1]
return None
def Advance(self):
if self._cursor > 0 and self._string[self._cursor - 1] == '\n':
self.next_line += 1
self.next_column = 0
elif self.next_token is not None:
self.next_column += len(self.next_token.text)
self.next_token = None
if self._cursor == len(self._string):
return None
assert self._cursor < len(self._string)
if (self._cursor + 1 < len(self._string) and
self._string[self._cursor + 1] in '{}'):
self.next_token = (
_TOKENS.get(self._string[self._cursor:self._cursor+3]) or
_TOKENS.get(self._string[self._cursor:self._cursor+2]))
if self.next_token is None:
self.next_token = _Token.CHARACTER
self._cursor += len(self.next_token.text)
return self
def AdvanceOver(self, token, description=None):
parse_error = None
if not self.next_token:
parse_error = 'Reached EOF but expected %s' % token.name
elif self.next_token is not token:
parse_error = 'Expecting token %s but got %s at line %s' % (
token.name, self.next_token.name, self.next_line)
if parse_error:
parse_error += ' %s' % description or ''
raise ParseException(parse_error)
return self.Advance()
def AdvanceOverSeparator(self, char, description=None):
self.SkipWhitespace()
next_char = self.NextCharacter()
if next_char != char:
parse_error = 'Expected \'%s\'. got \'%s\'' % (char, next_char)
if description is not None:
parse_error += ' (%s)' % description
raise ParseException(parse_error)
self.AdvanceOver(_Token.CHARACTER)
self.SkipWhitespace()
def AdvanceOverNextString(self, excluded=''):
start = self._cursor - len(self.next_token.text)
while (self.next_token is _Token.CHARACTER and
# Can use -1 here because token length of CHARACTER is 1.
self._string[self._cursor - 1] not in excluded):
self.Advance()
end = self._cursor - (len(self.next_token.text) if self.next_token else 0)
return self._string[start:end]
def AdvanceToNextWhitespace(self):
return self.AdvanceOverNextString(excluded=' \n\r\t')
def SkipWhitespace(self):
while (self.next_token is _Token.CHARACTER and
# Can use -1 here because token length of CHARACTER is 1.
self._string[self._cursor - 1] in ' \n\r\t'):
self.Advance()
def __repr__(self):
return '%s(next_token=%s, remainder=%s)' % (type(self).__name__,
self.next_token,
self._string[self._cursor:])
def __str__(self):
return repr(self)
class Motemplate(object):
'''A motemplate template.
'''
def __init__(self, template, name=None):
self.source = template
self._name = name
tokens = _TokenStream(template)
self._top_node = self._ParseSection(tokens)
if not self._top_node:
raise ParseException('Template is empty')
if tokens.HasNext():
raise ParseException('There are still tokens remaining at %s, '
'was there an end-section without a start-section?' %
tokens.next_line)
def _ParseSection(self, tokens):
nodes = []
while tokens.HasNext():
if tokens.next_token in (_Token.OPEN_END_SECTION,
_Token.OPEN_ELSE):
# Handled after running parseSection within the SECTION cases, so this
# is a terminating condition. If there *is* an orphaned
# OPEN_END_SECTION, it will be caught by noticing that there are
# leftover tokens after termination.
break
elif tokens.next_token in (_Token.CLOSE_MUSTACHE,
_Token.CLOSE_MUSTACHE3):
raise ParseException('Orphaned %s at line %s' % (tokens.next_token.name,
tokens.next_line))
nodes += self._ParseNextOpenToken(tokens)
for i, node in enumerate(nodes):
if isinstance(node, _StringNode):
continue
previous_node = nodes[i - 1] if i > 0 else None
next_node = nodes[i + 1] if i < len(nodes) - 1 else None
rendered_node = None
if node.GetStartLine() != node.GetEndLine():
rendered_node = _BlockNode(node)
if previous_node:
previous_node.TrimEndingSpaces()
if next_node:
next_node.TrimStartingNewLine()
elif ((not previous_node or previous_node.EndsWithEmptyLine()) and
(not next_node or next_node.StartsWithNewLine())):
indentation = 0
if previous_node:
indentation = previous_node.TrimEndingSpaces()
if next_node:
next_node.TrimStartingNewLine()
rendered_node = _IndentedNode(node, indentation)
else:
rendered_node = _InlineNode(node)
nodes[i] = rendered_node
if len(nodes) == 0:
return None
if len(nodes) == 1:
return nodes[0]
return _NodeCollection(nodes)
def _ParseNextOpenToken(self, tokens):
next_token = tokens.next_token
if next_token is _Token.CHARACTER:
# Plain strings.
start_line = tokens.next_line
string = tokens.AdvanceOverNextString()
return [_StringNode(string, start_line, tokens.next_line)]
elif next_token in (_Token.OPEN_VARIABLE,
_Token.OPEN_UNESCAPED_VARIABLE,
_Token.OPEN_JSON):
# Inline nodes that don't take arguments.
tokens.Advance()
close_token = (_Token.CLOSE_MUSTACHE3
if next_token is _Token.OPEN_UNESCAPED_VARIABLE else
_Token.CLOSE_MUSTACHE)
id_ = self._NextIdentifier(tokens)
tokens.AdvanceOver(close_token)
return [next_token.clazz(id_)]
elif next_token is _Token.OPEN_ASSERTION:
# Inline nodes that take arguments.
tokens.Advance()
id_ = self._NextIdentifier(tokens)
node = next_token.clazz(id_, tokens.AdvanceOverNextString())
tokens.AdvanceOver(_Token.CLOSE_MUSTACHE)
return [node]
elif next_token in (_Token.OPEN_PARTIAL,
_Token.OPEN_START_SECTION,
_Token.OPEN_START_VERTED_SECTION,
_Token.OPEN_START_INVERTED_SECTION):
# Block nodes, though they may have inline syntax like {{#foo bar /}}.
tokens.Advance()
bind_to, id_ = None, self._NextIdentifier(tokens)
if tokens.NextCharacter() == ':':
# This section has the format {{#bound:id}} as opposed to just {{id}}.
# That is, |id_| is actually the identifier to bind what the section
# is producing, not the identifier of where to find that content.
tokens.AdvanceOverSeparator(':')
bind_to, id_ = id_, self._NextIdentifier(tokens)
partial_args = None
if next_token is _Token.OPEN_PARTIAL:
partial_args = self._ParsePartialNodeArgs(tokens)
if tokens.next_token is not _Token.CLOSE_MUSTACHE:
# Inline syntax for partial types.
if bind_to is not None:
raise ParseException(
'Cannot bind %s to a self-closing partial' % bind_to)
tokens.AdvanceOver(_Token.INLINE_END_SECTION)
partial_node = _PartialNode.Inline(id_)
partial_node.SetArguments(partial_args)
return [partial_node]
elif tokens.next_token is not _Token.CLOSE_MUSTACHE:
# Inline syntax for non-partial types. Support select node types:
# variables, partials, JSON.
line, column = tokens.next_line, (tokens.next_column + 1)
name = tokens.AdvanceToNextWhitespace()
clazz = _UnescapedVariableNode
if name.startswith('*'):
clazz = _JsonNode
elif name.startswith('+'):
clazz = _PartialNode.Inline
if clazz is not _UnescapedVariableNode:
name = name[1:]
column += 1
inline_node = clazz(_Identifier(name, line, column))
if isinstance(inline_node, _PartialNode):
inline_node.SetArguments(self._ParsePartialNodeArgs(tokens))
if bind_to is not None:
inline_node.PassThroughArgument(bind_to)
tokens.SkipWhitespace()
tokens.AdvanceOver(_Token.INLINE_END_SECTION)
return [next_token.clazz(bind_to, id_, inline_node)]
# Block syntax.
tokens.AdvanceOver(_Token.CLOSE_MUSTACHE)
section = self._ParseSection(tokens)
else_node_class = next_token.ElseNodeClass() # may not have one
else_section = None
if (else_node_class is not None and
tokens.next_token is _Token.OPEN_ELSE):
self._OpenElse(tokens, id_)
else_section = self._ParseSection(tokens)
self._CloseSection(tokens, id_)
nodes = []
if section is not None:
node = next_token.clazz(bind_to, id_, section)
if partial_args:
node.SetArguments(partial_args)
nodes.append(node)
if else_section is not None:
nodes.append(else_node_class(bind_to, id_, else_section))
return nodes
elif next_token is _Token.OPEN_COMMENT:
# Comments.
start_line = tokens.next_line
self._AdvanceOverComment(tokens)
return [_CommentNode(start_line, tokens.next_line)]
def _AdvanceOverComment(self, tokens):
tokens.AdvanceOver(_Token.OPEN_COMMENT)
depth = 1
while tokens.HasNext() and depth > 0:
if tokens.next_token is _Token.OPEN_COMMENT:
depth += 1
elif tokens.next_token is _Token.CLOSE_COMMENT:
depth -= 1
tokens.Advance()
def _CloseSection(self, tokens, id_):
tokens.AdvanceOver(_Token.OPEN_END_SECTION,
description='to match %s' % id_.GetDescription())
next_string = tokens.AdvanceOverNextString()
if next_string != '' and next_string != id_.name:
raise ParseException(
'Start section %s doesn\'t match end %s' % (id_, next_string))
tokens.AdvanceOver(_Token.CLOSE_MUSTACHE)
def _OpenElse(self, tokens, id_):
tokens.AdvanceOver(_Token.OPEN_ELSE)
next_string = tokens.AdvanceOverNextString()
if next_string != '' and next_string != id_.name:
raise ParseException(
'Start section %s doesn\'t match else %s' % (id_, next_string))
tokens.AdvanceOver(_Token.CLOSE_MUSTACHE)
def _ParsePartialNodeArgs(self, tokens):
args = {}
tokens.SkipWhitespace()
while (tokens.next_token is _Token.CHARACTER and
tokens.NextCharacter() != ')'):
key = tokens.AdvanceOverNextString(excluded=':')
tokens.AdvanceOverSeparator(':')
if tokens.NextCharacter() == '(':
tokens.AdvanceOverSeparator('(')
inner_id = self._NextIdentifier(tokens)
inner_args = self._ParsePartialNodeArgs(tokens)
tokens.AdvanceOverSeparator(')')
args[key] = {inner_id: inner_args}
else:
args[key] = self._NextIdentifier(tokens)
return args or None
def _NextIdentifier(self, tokens):
tokens.SkipWhitespace()
column_start = tokens.next_column + 1
id_ = _Identifier(tokens.AdvanceOverNextString(excluded=' \n\r\t:()'),
tokens.next_line,
column_start)
tokens.SkipWhitespace()
return id_
def Render(self, *user_contexts):
'''Renders this template given a variable number of contexts to read out
values from (such as those appearing in {{foo}}).
'''
internal_context = _InternalContext()
contexts = list(user_contexts)
contexts.append({
'_': internal_context,
'false': False,
'true': True,
})
render_state = _RenderState(self._name or '<root>', _Contexts(contexts))
internal_context.SetRenderState(render_state)
self._top_node.Render(render_state)
return render_state.GetResult()
def render(self, *contexts):
return self.Render(*contexts)
def __eq__(self, other):
return self.source == other.source and self._name == other._name
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return str('%s(%s)' % (type(self).__name__, self._top_node))
def __str__(self):
return repr(self)
|
openfun/edx-platform
|
refs/heads/master
|
common/djangoapps/course_action_state/migrations/0002_add_rerun_display_name.py
|
129
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseRerunState.display_name'
db.add_column('course_action_state_coursererunstate', 'display_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseRerunState.display_name'
db.delete_column('course_action_state_coursererunstate', 'display_name')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'course_action_state.coursererunstate': {
'Meta': {'unique_together': "(('course_key', 'action'),)", 'object_name': 'CourseRerunState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'should_display': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'updated_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'updated_by_user+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"})
}
}
complete_apps = ['course_action_state']
|
SaschaMester/delicium
|
refs/heads/master
|
tools/memory_inspector/memory_inspector/classification/rules_unittest.py
|
109
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from memory_inspector.classification import rules
_TEST_RULE = """
[
{
'name': '1',
'mmap-file': r'/foo/1',
'foo': 'bar',
'children': [
{
'name': '1/1',
'mmap-file': r'/foo/1/1',
'children': []
},
{
'name': '1/2',
'mmap-file': r'/foo/1/2',
},
]
},
{
'name': '2',
'mmap-file': r'/bar/2',
'children': [
{
'name': '2/1',
'mmap-file': r'/bar/2/1',
},
{
'name': '2/2',
'mmap-file': r'/bar/2/2',
'children': [
{
'name': '2/2/1',
'mmap-file': r'/bar/2/2/1',
},
{
'name': '2/2/2',
'mmap-file': r'/bar/2/2/2',
},
]
},
{
'name': '2/3',
'mmap-file': r'/bar/3',
},
]
},
]
"""
class RulesTest(unittest.TestCase):
def runTest(self):
rt = rules.Load(_TEST_RULE, MockRule)
self.assertEqual(rt.name, 'Total')
self.assertEqual(len(rt.children), 3)
node1 = rt.children[0]
node2 = rt.children[1]
node3 = rt.children[2]
# Check 1-st level leaves.
self.assertEqual(node1.name, '1')
self.assertEqual(node1.filters['mmap-file'], '/foo/1')
self.assertEqual(node1.filters['foo'], 'bar')
self.assertEqual(node2.name, '2')
self.assertEqual(node2.filters['mmap-file'], '/bar/2')
self.assertEqual(node3.name, 'Total-other')
# Check 2-nd level leaves and their children.
self.assertEqual(len(node1.children), 3)
self.assertEqual(node1.children[0].name, '1/1')
self.assertEqual(node1.children[1].name, '1/2')
self.assertEqual(node1.children[2].name, '1-other')
self.assertEqual(len(node2.children), 4)
self.assertEqual(node2.children[0].name, '2/1')
self.assertEqual(len(node2.children[0].children), 0)
self.assertEqual(node2.children[1].name, '2/2')
self.assertEqual(len(node2.children[1].children), 3)
self.assertEqual(node2.children[2].name, '2/3')
self.assertEqual(len(node2.children[2].children), 0)
self.assertEqual(node2.children[3].name, '2-other')
self.assertEqual(len(node2.children[3].children), 0)
class MockRule(rules.Rule):
def __init__(self, name, filters):
super(MockRule, self).__init__(name)
self.filters = filters
|
rjschwei/azure-sdk-for-python
|
refs/heads/master
|
azure-batch/azure/batch/models/exit_code_range_mapping.py
|
3
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExitCodeRangeMapping(Model):
"""A range of exit codes and how the Batch service should respond to exit
codes within that range.
:param start: The first exit code in the range.
:type start: int
:param end: The last exit code in the range.
:type end: int
:param exit_options: How the Batch service should respond if the task
exits with an exit code in the range start to end (inclusive).
:type exit_options: :class:`ExitOptions <azure.batch.models.ExitOptions>`
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
'exit_options': {'required': True},
}
_attribute_map = {
'start': {'key': 'start', 'type': 'int'},
'end': {'key': 'end', 'type': 'int'},
'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'},
}
def __init__(self, start, end, exit_options):
self.start = start
self.end = end
self.exit_options = exit_options
|
lbartoletti/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsdelimitedtextprovider.py
|
10
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsDelimitedTextProvider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Chris Crook'
__date__ = '20/04/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
# This module provides unit test for the delimited text provider. It uses data files in
# the testdata/delimitedtext directory.
#
# New tests can be created (or existing ones updated), but incorporating a createTest
# call into the test. This will load the file and generate a test that the features
# loaded from it are correct. It assumes that the data is correct at the time the
# test is created. The new test is written to the test output file, and can be edited into
# this module to implement the test.
#
# To recreate all tests, set rebuildTests to true
import qgis # NOQA
import os
import re
import tempfile
import inspect
import time
import test_qgsdelimitedtextprovider_wanted as want # NOQA
from collections.abc import Callable
rebuildTests = 'REBUILD_DELIMITED_TEXT_TESTS' in os.environ
from qgis.PyQt.QtCore import QCoreApplication, QVariant, QUrl, QObject
from qgis.core import (
QgsProviderRegistry,
QgsVectorLayer,
QgsFeatureRequest,
QgsRectangle,
QgsApplication,
QgsFeature,
QgsWkbTypes,
QgsFeatureSource)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath, compareWkt, compareUrl
from providertestbase import ProviderTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
geomkey = "#geometry"
fidkey = "#fid"
try:
# Qt 5
from qgis.PyQt.QtCore import QUrlQuery
class MyUrl:
def __init__(self, url):
self.url = url
self.query = QUrlQuery()
@classmethod
def fromLocalFile(cls, filename):
return cls(QUrl.fromLocalFile(filename))
def addQueryItem(self, k, v):
self.query.addQueryItem(k, v)
def toString(self):
urlstr = self.url.toString()
querystr = self.query.toString(QUrl.FullyDecoded)
if querystr != '':
urlstr += '?'
urlstr += querystr
return urlstr
except:
MyUrl = QUrl
def normalize_query_items_order(s):
split_url = s.split('?')
urlstr = split_url[0]
if len(split_url) == 2:
items_list = split_url[1].split('&')
items_map = {}
for item in items_list:
split_item = item.split('=')
items_map[split_item[0]] = split_item[1]
first_arg = True
for k in sorted(items_map.keys()):
if first_arg:
urlstr += '?'
first_arg = False
else:
urlstr += '&'
urlstr += k + '=' + items_map[k]
return urlstr
# Thought we could connect to messageReceived signal but doesn't seem to be available
# in python :-( Not sure why?
class MessageLogger(QObject):
def __init__(self, tag=None):
QObject.__init__(self)
self.log = []
self.tag = tag
def __enter__(self):
QgsApplication.messageLog().messageReceived.connect(self.logMessage)
return self
def __exit__(self, type, value, traceback):
QgsApplication.messageLog().messageReceived.disconnect(self.logMessage)
def logMessage(self, msg, tag, level):
if tag == self.tag or not self.tag:
self.log.append(str(msg))
def messages(self):
return self.log
class TestQgsDelimitedTextProviderXY(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
cls.basetestfile = os.path.join(srcpath, 'delimited_xy.csv')
url = MyUrl.fromLocalFile(cls.basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
cls.vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert cls.vl.isValid(), "{} is invalid".format(cls.basetestfile)
cls.source = cls.vl.dataProvider()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def treat_time_as_string(self):
return False
def treat_date_as_string(self):
return False
def treat_datetime_as_string(self):
return False
class TestQgsDelimitedTextProviderWKT(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
cls.basetestfile = os.path.join(srcpath, 'delimited_wkt.csv')
url = MyUrl.fromLocalFile(cls.basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("wktField", "wkt")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
cls.vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert cls.vl.isValid(), "{} is invalid".format(cls.basetestfile)
cls.source = cls.vl.dataProvider()
cls.basetestpolyfile = os.path.join(srcpath, 'delimited_wkt_poly.csv')
url = MyUrl.fromLocalFile(cls.basetestpolyfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("wktField", "wkt")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
cls.vl_poly = QgsVectorLayer(url.toString(), 'test_polygon', 'delimitedtext')
assert cls.vl_poly.isValid(), "{} is invalid".format(cls.basetestpolyfile)
cls.poly_provider = cls.vl_poly.dataProvider()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def treat_time_as_string(self):
return False
def treat_date_as_string(self):
return False
def treat_datetime_as_string(self):
return False
class TestQgsDelimitedTextProviderOther(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# toggle full ctest output to debug flaky CI test
print('CTEST_FULL_OUTPUT')
def layerData(self, layer, request={}, offset=0):
# Retrieve the data for a layer
first = True
data = {}
fields = []
fieldTypes = []
fr = QgsFeatureRequest()
if request:
if 'exact' in request and request['exact']:
fr.setFlags(QgsFeatureRequest.ExactIntersect)
if 'nogeom' in request and request['nogeom']:
fr.setFlags(QgsFeatureRequest.NoGeometry)
if 'fid' in request:
fr.setFilterFid(request['fid'])
elif 'extents' in request:
fr.setFilterRect(QgsRectangle(*request['extents']))
if 'attributes' in request:
fr.setSubsetOfAttributes(request['attributes'])
# IMPORTANT - we do not use `for f in layer.getFeatures(fr):` as we need
# to verify that existing attributes and geometry are correctly cleared
# from the feature when calling nextFeature()
it = layer.getFeatures(fr)
f = QgsFeature()
while it.nextFeature(f):
if first:
first = False
for field in f.fields():
fields.append(str(field.name()))
fieldTypes.append(str(field.typeName()))
fielddata = dict((name, str(f[name])) for name in fields)
g = f.geometry()
if not g.isNull():
fielddata[geomkey] = str(g.asWkt())
else:
fielddata[geomkey] = "None"
fielddata[fidkey] = f.id()
id = fielddata[fields[0]]
description = fielddata[fields[1]]
fielddata['id'] = id
fielddata['description'] = description
data[f.id() + offset] = fielddata
if 'id' not in fields:
fields.insert(0, 'id')
if 'description' not in fields:
fields.insert(1, 'description')
fields.append(fidkey)
fields.append(geomkey)
return fields, fieldTypes, data
def delimitedTextData(self, testname, filename, requests, verbose, **params):
# Retrieve the data for a delimited text url
# Create a layer for the specified file and query parameters
# and return the data for the layer (fields, data)
filepath = os.path.join(unitTestDataPath("delimitedtext"), filename)
url = MyUrl.fromLocalFile(filepath)
if not requests:
requests = [{}]
for k in list(params.keys()):
url.addQueryItem(k, params[k])
urlstr = url.toString()
log = []
with MessageLogger('DelimitedText') as logger:
if verbose:
print(testname)
layer = QgsVectorLayer(urlstr, 'test', 'delimitedtext')
# decodeUri / encodeUri check
self.assertTrue(compareUrl(layer.source(), QgsProviderRegistry.instance().encodeUri('delimitedtext', QgsProviderRegistry.instance().decodeUri('delimitedtext', layer.source()))))
uri = layer.dataProvider().dataSourceUri()
if verbose:
print(uri)
basename = os.path.basename(filepath)
if not basename.startswith('test'):
basename = 'file'
uri = re.sub(r'^file\:\/\/[^\?]*', 'file://' + basename, uri)
fields = []
fieldTypes = []
data = {}
if layer.isValid():
for nr, r in enumerate(requests):
if verbose:
print(("Processing request", nr + 1, repr(r)))
if isinstance(r, Callable):
r(layer)
if verbose:
print("Request function executed")
if isinstance(r, Callable):
continue
rfields, rtypes, rdata = self.layerData(layer, r, nr * 1000)
if len(rfields) > len(fields):
fields = rfields
fieldTypes = rtypes
data.update(rdata)
if not rdata:
log.append("Request " + str(nr) + " did not return any data")
if verbose:
print(("Request returned", len(list(rdata.keys())), "features"))
for msg in logger.messages():
filelogname = 'temp_file' if 'tmp' in filename.lower() else filename
msg = re.sub(r'file\s+.*' + re.escape(filename), 'file ' + filelogname, msg)
msg = msg.replace(filepath, filelogname)
log.append(msg)
return dict(fields=fields, fieldTypes=fieldTypes, data=data, log=log, uri=uri, geometryType=layer.geometryType())
def printWanted(self, testname, result):
# Routine to export the result as a function definition
print()
print(("def {0}():".format(testname)))
data = result['data']
log = result['log']
fields = result['fields']
prefix = ' '
# Dump the data for a layer - used to construct unit tests
print((prefix + "wanted={}"))
print((prefix + "wanted['uri']=" + repr(result['uri'])))
print((prefix + "wanted['fieldTypes']=" + repr(result['fieldTypes'])))
print((prefix + "wanted['geometryType']=" + repr(result['geometryType'])))
print((prefix + "wanted['data']={"))
for k in sorted(data.keys()):
row = data[k]
print((prefix + " {0}: {{".format(repr(k))))
for f in fields:
print((prefix + " " + repr(f) + ": " + repr(row[f]) + ","))
print((prefix + " },"))
print((prefix + " }"))
print((prefix + "wanted['log']=["))
for msg in log:
print((prefix + ' ' + repr(msg) + ','))
print((prefix + ' ]'))
print(' return wanted')
print('', flush=True)
def recordDifference(self, record1, record2):
# Compare a record defined as a dictionary
for k in list(record1.keys()):
if k not in record2:
return "Field {0} is missing".format(k)
r1k = record1[k]
r2k = record2[k]
if k == geomkey:
if not compareWkt(r1k, r2k):
return "Geometry differs: {0:.50} versus {1:.50}".format(r1k, r2k)
else:
if record1[k] != record2[k]:
return "Field {0} differs: {1:.50} versus {2:.50}".format(k, repr(r1k), repr(r2k))
for k in list(record2.keys()):
if k not in record1:
return "Output contains extra field {0}".format(k)
return ''
def runTest(self, file, requests, **params):
testname = inspect.stack()[1][3]
verbose = not rebuildTests
if verbose:
print(("Running test:", testname))
result = self.delimitedTextData(testname, file, requests, verbose, **params)
if rebuildTests:
self.printWanted(testname, result)
assert False, "Test not run - being rebuilt"
try:
wanted = eval('want.{0}()'.format(testname))
except:
self.printWanted(testname, result)
assert False, "Test results not available for {0}".format(testname)
data = result['data']
log = result['log']
failures = []
if normalize_query_items_order(result['uri']) != normalize_query_items_order(wanted['uri']):
msg = "Layer Uri ({0}) doesn't match expected ({1})".format(
normalize_query_items_order(result['uri']), normalize_query_items_order(wanted['uri']))
print((' ' + msg))
failures.append(msg)
if result['fieldTypes'] != wanted['fieldTypes']:
msg = "Layer field types ({0}) doesn't match expected ({1})".format(
result['fieldTypes'], wanted['fieldTypes'])
failures.append(msg)
if result['geometryType'] != wanted['geometryType']:
msg = "Layer geometry type ({0}) doesn't match expected ({1})".format(
result['geometryType'], wanted['geometryType'])
failures.append(msg)
wanted_data = wanted['data']
for id in sorted(wanted_data.keys()):
print('getting wanted data')
wrec = wanted_data[id]
print('getting received data')
trec = data.get(id, {})
print('getting description')
description = wrec['description']
print('getting difference')
difference = self.recordDifference(wrec, trec)
if not difference:
print((' {0}: Passed'.format(description)))
else:
print((' {0}: {1}'.format(description, difference)))
failures.append(description + ': ' + difference)
for id in sorted(data.keys()):
if id not in wanted_data:
msg = "Layer contains unexpected extra data with id: \"{0}\"".format(id)
print((' ' + msg))
failures.append(msg)
common = []
log_wanted = wanted['log']
for l in log:
if l in log_wanted:
common.append(l)
for l in log_wanted:
if l not in common:
msg = 'Missing log message: ' + l
print((' ' + msg))
failures.append(msg)
for l in log:
if l not in common:
msg = 'Extra log message: ' + l
print((' ' + msg))
failures.append(msg)
if len(log) == len(common) and len(log_wanted) == len(common):
print(' Message log correct: Passed')
if failures:
self.printWanted(testname, result)
assert len(failures) == 0, "\n".join(failures)
def test_001_provider_defined(self):
registry = QgsProviderRegistry.instance()
metadata = registry.providerMetadata('delimitedtext')
assert metadata is not None, "Delimited text provider is not installed"
def test_002_load_csv_file(self):
# CSV file parsing
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_003_field_naming(self):
# Management of missing/duplicate/invalid field names
filename = 'testfields.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_004_max_fields(self):
# Limiting maximum number of fields
filename = 'testfields.csv'
params = {'geomType': 'none', 'maxFields': '7', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_005_load_whitespace(self):
# Whitespace file parsing
filename = 'test.space'
params = {'geomType': 'none', 'type': 'whitespace'}
requests = None
self.runTest(filename, requests, **params)
def test_006_quote_escape(self):
# Quote and escape file parsing
filename = 'test.pipe'
params = {'geomType': 'none', 'quote': '"', 'delimiter': '|', 'escape': '\\'}
requests = None
self.runTest(filename, requests, **params)
def test_007_multiple_quote(self):
# Multiple quote and escape characters
filename = 'test.quote'
params = {'geomType': 'none', 'quote': '\'"', 'type': 'csv', 'escape': '"\''}
requests = None
self.runTest(filename, requests, **params)
def test_008_badly_formed_quotes(self):
# Badly formed quoted fields
filename = 'test.badquote'
params = {'geomType': 'none', 'quote': '"', 'type': 'csv', 'escape': '"'}
requests = None
self.runTest(filename, requests, **params)
def test_009_skip_lines(self):
# Skip lines
filename = 'test2.csv'
params = {'geomType': 'none', 'useHeader': 'no', 'type': 'csv', 'skipLines': '2'}
requests = None
self.runTest(filename, requests, **params)
def test_010_read_coordinates(self):
# Skip lines
filename = 'testpt.csv'
params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_011_read_wkt(self):
# Reading WKT geometry field
filename = 'testwkt.csv'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_012_read_wkt_point(self):
# Read WKT points
filename = 'testwkt.csv'
params = {'geomType': 'point', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_013_read_wkt_line(self):
# Read WKT linestrings
filename = 'testwkt.csv'
params = {'geomType': 'line', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_014_read_wkt_polygon(self):
# Read WKT polygons
filename = 'testwkt.csv'
params = {'geomType': 'polygon', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_015_read_dms_xy(self):
# Reading degrees/minutes/seconds angles
filename = 'testdms.csv'
params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv', 'xyDms': 'yes'}
requests = None
self.runTest(filename, requests, **params)
def test_016_decimal_point(self):
# Reading degrees/minutes/seconds angles
filename = 'testdp.csv'
params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv', 'delimiter': ';', 'decimalPoint': ','}
requests = None
self.runTest(filename, requests, **params)
def test_017_regular_expression_1(self):
# Parsing regular expression delimiter
filename = 'testre.txt'
params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': 'RE(?:GEXP)?', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_018_regular_expression_2(self):
# Parsing regular expression delimiter with capture groups
filename = 'testre.txt'
params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': '(RE)(GEXP)?', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_019_regular_expression_3(self):
# Parsing anchored regular expression
filename = 'testre2.txt'
params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': '^(.{5})(.{30})(.{5,})', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_020_regular_expression_4(self):
# Parsing zero length re
filename = 'testre3.txt'
params = {'geomType': 'none', 'delimiter': 'x?', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_021_regular_expression_5(self):
# Parsing zero length re 2
filename = 'testre3.txt'
params = {'geomType': 'none', 'delimiter': '\\b', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_022_utf8_encoded_file(self):
# UTF8 encoded file test
filename = 'testutf8.csv'
params = {'geomType': 'none', 'delimiter': '|', 'type': 'csv', 'encoding': 'utf-8'}
requests = None
self.runTest(filename, requests, **params)
def test_023_latin1_encoded_file(self):
# Latin1 encoded file test
filename = 'testlatin1.csv'
params = {'geomType': 'none', 'delimiter': '|', 'type': 'csv', 'encoding': 'latin1'}
requests = None
self.runTest(filename, requests, **params)
def test_024_filter_rect_xy(self):
# Filter extents on XY layer
filename = 'testextpt.txt'
params = {'yField': 'y', 'delimiter': '|', 'type': 'csv', 'xField': 'x'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]}]
self.runTest(filename, requests, **params)
def test_025_filter_rect_wkt(self):
# Filter extents on WKT layer
filename = 'testextw.txt'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]}]
self.runTest(filename, requests, **params)
def test_026_filter_fid(self):
# Filter on feature id
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = [
{'fid': 3},
{'fid': 9},
{'fid': 20},
{'fid': 3}]
self.runTest(filename, requests, **params)
def test_027_filter_attributes(self):
# Filter on attributes
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = [
{'attributes': [1, 3]},
{'fid': 9},
{'attributes': [1, 3], 'fid': 9},
{'attributes': [3, 1], 'fid': 9},
{'attributes': [1, 3, 7], 'fid': 9},
{'attributes': [], 'fid': 9}]
self.runTest(filename, requests, **params)
def test_028_substring_test(self):
# CSV file parsing
filename = 'test.csv'
params = {'geomType': 'none', 'subset': 'id % 2 = 1', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_029_file_watcher(self):
# Testing file watcher
(filehandle, filename) = tempfile.mkstemp()
if os.name == "nt":
filename = filename.replace("\\", "/")
with os.fdopen(filehandle, "w") as f:
f.write("id,name\n1,rabbit\n2,pooh\n")
def appendfile(layer):
with open(filename, 'a') as f:
f.write('3,tiger\n')
# print "Appended to file - sleeping"
time.sleep(1)
QCoreApplication.instance().processEvents()
def rewritefile(layer):
with open(filename, 'w') as f:
f.write("name,size,id\ntoad,small,5\nmole,medium,6\nbadger,big,7\n")
# print "Rewritten file - sleeping"
time.sleep(1)
QCoreApplication.instance().processEvents()
def deletefile(layer):
try:
os.remove(filename)
except:
open(filename, "w").close()
assert os.path.getsize(filename) == 0, "removal and truncation of {} failed".format(filename)
# print "Deleted file - sleeping"
time.sleep(1)
QCoreApplication.instance().processEvents()
params = {'geomType': 'none', 'type': 'csv', 'watchFile': 'yes'}
requests = [
{'fid': 3},
{},
{'fid': 7},
appendfile,
{'fid': 3},
{'fid': 4},
{},
{'fid': 7},
rewritefile,
{'fid': 2},
{},
{'fid': 7},
deletefile,
{'fid': 2},
{},
rewritefile,
{'fid': 2},
]
self.runTest(filename, requests, **params)
def test_030_filter_rect_xy_spatial_index(self):
# Filter extents on XY layer with spatial index
filename = 'testextpt.txt'
params = {'yField': 'y', 'delimiter': '|', 'type': 'csv', 'xField': 'x', 'spatialIndex': 'Y'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]},
{},
{'extents': [-1000, -1000, 1000, 1000]}
]
self.runTest(filename, requests, **params)
def test_031_filter_rect_wkt_spatial_index(self):
# Filter extents on WKT layer with spatial index
filename = 'testextw.txt'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt', 'spatialIndex': 'Y'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]},
{},
{'extents': [-1000, -1000, 1000, 1000]}
]
self.runTest(filename, requests, **params)
def test_032_filter_rect_wkt_create_spatial_index(self):
# Filter extents on WKT layer building spatial index
filename = 'testextw.txt'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt'}
requests = [
{'extents': [10, 30, 30, 50]},
{},
lambda layer: layer.dataProvider().createSpatialIndex(),
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]},
{},
{'extents': [-1000, -1000, 1000, 1000]}
]
self.runTest(filename, requests, **params)
def test_033_reset_subset_string(self):
# CSV file parsing
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = [
{},
lambda layer: layer.dataProvider().setSubsetString("id % 2 = 1", True),
{},
lambda layer: layer.dataProvider().setSubsetString("id = 6", False),
{},
lambda layer: layer.dataProvider().setSubsetString("id = 3", False),
{},
lambda layer: layer.dataProvider().setSubsetString("id % 2 = 1", True),
{},
lambda layer: layer.dataProvider().setSubsetString("id % 2 = 0", True),
{},
]
self.runTest(filename, requests, **params)
def test_034_csvt_file(self):
# CSVT field types
filename = 'testcsvt.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_035_csvt_file2(self):
# CSV field types 2
filename = 'testcsvt2.txt'
params = {'geomType': 'none', 'type': 'csv', 'delimiter': '|'}
requests = None
self.runTest(filename, requests, **params)
def test_036_csvt_file_invalid_types(self):
# CSV field types invalid string format
filename = 'testcsvt3.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_037_csvt_file_invalid_file(self):
# CSV field types invalid file
filename = 'testcsvt4.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_038_type_inference(self):
# Skip lines
filename = 'testtypes.csv'
params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_039_issue_13749(self):
# First record contains missing geometry
filename = 'test13749.csv'
params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_040_issue_14666(self):
# x/y containing some null geometries
filename = 'test14666.csv'
params = {'yField': 'y', 'xField': 'x', 'type': 'csv', 'delimiter': '\\t'}
requests = None
self.runTest(filename, requests, **params)
def test_041_no_detect_type(self):
# CSV file parsing
# Skip lines
filename = 'testtypes.csv'
params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv', 'detectTypes': 'no'}
requests = None
self.runTest(filename, requests, **params)
def test_042_no_detect_types_csvt(self):
# CSVT field types
filename = 'testcsvt.csv'
params = {'geomType': 'none', 'type': 'csv', 'detectTypes': 'no'}
requests = None
self.runTest(filename, requests, **params)
def test_043_decodeuri(self):
# URI decoding
filename = '/home/to/path/test.csv'
uri = 'file://{}?geomType=none'.format(filename)
registry = QgsProviderRegistry.instance()
components = registry.decodeUri('delimitedtext', uri)
self.assertEqual(components['path'], filename)
def test_044_ZM(self):
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
basetestfile = os.path.join(srcpath, 'delimited_xyzm.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("zField", "Z")
url.addQueryItem("mField", "M")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
assert vl.wkbType() == QgsWkbTypes.PointZM, "wrong wkb type, should be PointZM"
assert vl.getFeature(2).geometry().asWkt() == "PointZM (-71.12300000000000466 78.23000000000000398 1 2)", "wrong PointZM geometry"
def test_045_Z(self):
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
basetestfile = os.path.join(srcpath, 'delimited_xyzm.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("zField", "Z")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
assert vl.wkbType() == QgsWkbTypes.PointZ, "wrong wkb type, should be PointZ"
assert vl.getFeature(2).geometry().asWkt() == "PointZ (-71.12300000000000466 78.23000000000000398 1)", "wrong PointZ geometry"
def test_046_M(self):
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
basetestfile = os.path.join(srcpath, 'delimited_xyzm.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("mField", "M")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
assert vl.wkbType() == QgsWkbTypes.PointM, "wrong wkb type, should be PointM"
assert vl.getFeature(2).geometry().asWkt() == "PointM (-71.12300000000000466 78.23000000000000398 2)", "wrong PointM geometry"
def test_047_datetime(self):
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
basetestfile = os.path.join(srcpath, 'delimited_datetime.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
assert vl.fields().at(4).type() == QVariant.DateTime
assert vl.fields().at(5).type() == QVariant.Date
assert vl.fields().at(6).type() == QVariant.Time
assert vl.fields().at(9).type() == QVariant.String
def testSpatialIndex(self):
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
basetestfile = os.path.join(srcpath, 'delimited_xyzm.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("spatialIndex", "no")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
self.assertTrue(vl.isValid())
self.assertEqual(vl.hasSpatialIndex(), QgsFeatureSource.SpatialIndexNotPresent)
vl.dataProvider().createSpatialIndex()
self.assertEqual(vl.hasSpatialIndex(), QgsFeatureSource.SpatialIndexPresent)
def testEncodeuri(self):
# URI decoding
filename = '/home/to/path/test.csv'
registry = QgsProviderRegistry.instance()
parts = {'path': filename}
uri = registry.encodeUri('delimitedtext', parts)
self.assertEqual(uri, 'file://' + filename)
def testCREndOfLineAndWorkingBuffer(self):
# Test CSV file with \r (CR) endings
# Test also that the logic to refill the buffer works properly
os.environ['QGIS_DELIMITED_TEXT_FILE_BUFFER_SIZE'] = '17'
try:
basetestfile = os.path.join(unitTestDataPath("delimitedtext"), 'test_cr_end_of_line.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("type", "csv")
url.addQueryItem("geomType", "none")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
fields = vl.fields()
self.assertEqual(len(fields), 2)
self.assertEqual(fields[0].name(), 'col0')
self.assertEqual(fields[1].name(), 'col1')
features = [f for f in vl.getFeatures()]
self.assertEqual(len(features), 2)
self.assertEqual(features[0]['col0'], 'value00')
self.assertEqual(features[0]['col1'], 'value01')
self.assertEqual(features[1]['col0'], 'value10')
self.assertEqual(features[1]['col1'], 'value11')
finally:
del os.environ['QGIS_DELIMITED_TEXT_FILE_BUFFER_SIZE']
def testSaturationOfWorkingBuffer(self):
# 10 bytes is sufficient to detect the header line, but not enough for the
# first record
os.environ['QGIS_DELIMITED_TEXT_FILE_BUFFER_SIZE'] = '10'
try:
basetestfile = os.path.join(unitTestDataPath("delimitedtext"), 'test_cr_end_of_line.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("type", "csv")
url.addQueryItem("geomType", "none")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
fields = vl.fields()
self.assertEqual(len(fields), 2)
self.assertEqual(fields[0].name(), 'col0')
self.assertEqual(fields[1].name(), 'col1')
features = [f for f in vl.getFeatures()]
self.assertEqual(len(features), 1)
self.assertEqual(features[0]['col0'], 'value00')
self.assertEqual(features[0]['col1'], 'va') # truncated
finally:
del os.environ['QGIS_DELIMITED_TEXT_FILE_BUFFER_SIZE']
if __name__ == '__main__':
unittest.main()
|
DChaushev/the-last-stand
|
refs/heads/master
|
usrinput.py
|
1
|
# input lib
from pygame.locals import *
import pygame, string
class ConfigError(KeyError): pass
class Config:
""" A utility for configuration """
def __init__(self, options, *look_for):
assertions = []
for key in look_for:
if key[0] in options.keys(): exec('self.'+key[0]+' = options[\''+key[0]+'\']')
else: exec('self.'+key[0]+' = '+key[1])
assertions.append(key[0])
for key in options.keys():
if key not in assertions: raise ConfigError(key+' not expected as option')
class Input:
""" A text input for pygame apps """
def __init__(self, **options):
""" Options: x, y, font, color, restricted, maxlength, prompt """
self.options = Config(options, ['x', '0'], ['y', '0'], ['font', 'pygame.font.Font(None, 32)'],
['color', '(0,0,0)'], ['restricted', '\'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!"#$%&\\\'()*+,-./:;<=>?@[\]^_`{|}~\''],
['maxlength', '-1'], ['prompt', '\'\''])
self.x = self.options.x; self.y = self.options.y
self.font = self.options.font
self.color = self.options.color
self.restricted = self.options.restricted
self.maxlength = self.options.maxlength
self.prompt = self.options.prompt; self.value = ''
self.shifted = False
self.pause = 0
def set_pos(self, x, y):
""" Set the position to x, y """
self.x = x
self.y = y
def set_font(self, font):
""" Set the font for the input """
self.font = font
def draw(self, surface):
""" Draw the text input to a surface """
text = self.font.render(self.prompt+self.value, 1, self.color)
surface.blit(text, (self.x, self.y))
def update(self, events):
""" Update the input based on passed events """
pressed = pygame.key.get_pressed()
if self.pause == 3 and pressed[K_BACKSPACE]:
self.pause = 0
self.value = self.value[:-1]
elif pressed[K_BACKSPACE]:
self.pause += 1
else:
self.pause = 0
for event in events:
if event.type == KEYUP:
if event.key == K_LSHIFT or event.key == K_RSHIFT: self.shifted = False
if event.type == KEYDOWN:
if event.key == K_BACKSPACE: self.value = self.value[:-1]
elif event.key == K_LSHIFT or event.key == K_RSHIFT: self.shifted = True
# elif event.key == K_SPACE: self.value += ' '
elif event.key == K_KP_ENTER: return self.value
if not self.shifted:
if event.key == K_a and 'a' in self.restricted: self.value += 'a'
elif event.key == K_b and 'b' in self.restricted: self.value += 'b'
elif event.key == K_c and 'c' in self.restricted: self.value += 'c'
elif event.key == K_d and 'd' in self.restricted: self.value += 'd'
elif event.key == K_e and 'e' in self.restricted: self.value += 'e'
elif event.key == K_f and 'f' in self.restricted: self.value += 'f'
elif event.key == K_g and 'g' in self.restricted: self.value += 'g'
elif event.key == K_h and 'h' in self.restricted: self.value += 'h'
elif event.key == K_i and 'i' in self.restricted: self.value += 'i'
elif event.key == K_j and 'j' in self.restricted: self.value += 'j'
elif event.key == K_k and 'k' in self.restricted: self.value += 'k'
elif event.key == K_l and 'l' in self.restricted: self.value += 'l'
elif event.key == K_m and 'm' in self.restricted: self.value += 'm'
elif event.key == K_n and 'n' in self.restricted: self.value += 'n'
elif event.key == K_o and 'o' in self.restricted: self.value += 'o'
elif event.key == K_p and 'p' in self.restricted: self.value += 'p'
elif event.key == K_q and 'q' in self.restricted: self.value += 'q'
elif event.key == K_r and 'r' in self.restricted: self.value += 'r'
elif event.key == K_s and 's' in self.restricted: self.value += 's'
elif event.key == K_t and 't' in self.restricted: self.value += 't'
elif event.key == K_u and 'u' in self.restricted: self.value += 'u'
elif event.key == K_v and 'v' in self.restricted: self.value += 'v'
elif event.key == K_w and 'w' in self.restricted: self.value += 'w'
elif event.key == K_x and 'x' in self.restricted: self.value += 'x'
elif event.key == K_y and 'y' in self.restricted: self.value += 'y'
elif event.key == K_z and 'z' in self.restricted: self.value += 'z'
elif event.key == K_0 and '0' in self.restricted: self.value += '0'
elif event.key == K_1 and '1' in self.restricted: self.value += '1'
elif event.key == K_2 and '2' in self.restricted: self.value += '2'
elif event.key == K_3 and '3' in self.restricted: self.value += '3'
elif event.key == K_4 and '4' in self.restricted: self.value += '4'
elif event.key == K_5 and '5' in self.restricted: self.value += '5'
elif event.key == K_6 and '6' in self.restricted: self.value += '6'
elif event.key == K_7 and '7' in self.restricted: self.value += '7'
elif event.key == K_8 and '8' in self.restricted: self.value += '8'
elif event.key == K_9 and '9' in self.restricted: self.value += '9'
elif event.key == K_BACKQUOTE and '`' in self.restricted: self.value += '`'
elif event.key == K_MINUS and '-' in self.restricted: self.value += '-'
elif event.key == K_EQUALS and '=' in self.restricted: self.value += '='
elif event.key == K_LEFTBRACKET and '[' in self.restricted: self.value += '['
elif event.key == K_RIGHTBRACKET and ']' in self.restricted: self.value += ']'
elif event.key == K_BACKSLASH and '\\' in self.restricted: self.value += '\\'
elif event.key == K_SEMICOLON and ';' in self.restricted: self.value += ';'
elif event.key == K_QUOTE and '\'' in self.restricted: self.value += '\''
elif event.key == K_COMMA and ',' in self.restricted: self.value += ','
elif event.key == K_PERIOD and '.' in self.restricted: self.value += '.'
elif event.key == K_SLASH and '/' in self.restricted: self.value += '/'
elif self.shifted:
if event.key == K_a and 'A' in self.restricted: self.value += 'A'
elif event.key == K_b and 'B' in self.restricted: self.value += 'B'
elif event.key == K_c and 'C' in self.restricted: self.value += 'C'
elif event.key == K_d and 'D' in self.restricted: self.value += 'D'
elif event.key == K_e and 'E' in self.restricted: self.value += 'E'
elif event.key == K_f and 'F' in self.restricted: self.value += 'F'
elif event.key == K_g and 'G' in self.restricted: self.value += 'G'
elif event.key == K_h and 'H' in self.restricted: self.value += 'H'
elif event.key == K_i and 'I' in self.restricted: self.value += 'I'
elif event.key == K_j and 'J' in self.restricted: self.value += 'J'
elif event.key == K_k and 'K' in self.restricted: self.value += 'K'
elif event.key == K_l and 'L' in self.restricted: self.value += 'L'
elif event.key == K_m and 'M' in self.restricted: self.value += 'M'
elif event.key == K_n and 'N' in self.restricted: self.value += 'N'
elif event.key == K_o and 'O' in self.restricted: self.value += 'O'
elif event.key == K_p and 'P' in self.restricted: self.value += 'P'
elif event.key == K_q and 'Q' in self.restricted: self.value += 'Q'
elif event.key == K_r and 'R' in self.restricted: self.value += 'R'
elif event.key == K_s and 'S' in self.restricted: self.value += 'S'
elif event.key == K_t and 'T' in self.restricted: self.value += 'T'
elif event.key == K_u and 'U' in self.restricted: self.value += 'U'
elif event.key == K_v and 'V' in self.restricted: self.value += 'V'
elif event.key == K_w and 'W' in self.restricted: self.value += 'W'
elif event.key == K_x and 'X' in self.restricted: self.value += 'X'
elif event.key == K_y and 'Y' in self.restricted: self.value += 'Y'
elif event.key == K_z and 'Z' in self.restricted: self.value += 'Z'
elif event.key == K_0 and ')' in self.restricted: self.value += ')'
elif event.key == K_1 and '!' in self.restricted: self.value += '!'
elif event.key == K_2 and '@' in self.restricted: self.value += '@'
elif event.key == K_3 and '#' in self.restricted: self.value += '#'
elif event.key == K_4 and '$' in self.restricted: self.value += '$'
elif event.key == K_5 and '%' in self.restricted: self.value += '%'
elif event.key == K_6 and '^' in self.restricted: self.value += '^'
elif event.key == K_7 and '&' in self.restricted: self.value += '&'
elif event.key == K_8 and '*' in self.restricted: self.value += '*'
elif event.key == K_9 and '(' in self.restricted: self.value += '('
elif event.key == K_BACKQUOTE and '~' in self.restricted: self.value += '~'
elif event.key == K_MINUS and '_' in self.restricted: self.value += '_'
elif event.key == K_EQUALS and '+' in self.restricted: self.value += '+'
elif event.key == K_LEFTBRACKET and '{' in self.restricted: self.value += '{'
elif event.key == K_RIGHTBRACKET and '}' in self.restricted: self.value += '}'
elif event.key == K_BACKSLASH and '|' in self.restricted: self.value += '|'
elif event.key == K_SEMICOLON and ':' in self.restricted: self.value += ':'
elif event.key == K_QUOTE and '"' in self.restricted: self.value += '"'
elif event.key == K_COMMA and '<' in self.restricted: self.value += '<'
elif event.key == K_PERIOD and '>' in self.restricted: self.value += '>'
elif event.key == K_SLASH and '?' in self.restricted: self.value += '?'
if len(self.value) > self.maxlength and self.maxlength >= 0: self.value = self.value[:-1]
|
kushalbhola/MyStuff
|
refs/heads/master
|
Practice/PythonApplication/env/Lib/site-packages/pandas/tests/scalar/interval/test_ops.py
|
2
|
"""Tests for Interval-Interval operations, such as overlaps, contains, etc."""
import pytest
from pandas import Interval, Timedelta, Timestamp
@pytest.fixture(
params=[
(Timedelta("0 days"), Timedelta("1 day")),
(Timestamp("2018-01-01"), Timedelta("1 day")),
(0, 1),
],
ids=lambda x: type(x[0]).__name__,
)
def start_shift(request):
"""
Fixture for generating intervals of types from a start value and a shift
value that can be added to start to generate an endpoint
"""
return request.param
class TestOverlaps:
def test_overlaps_self(self, start_shift, closed):
start, shift = start_shift
interval = Interval(start, start + shift, closed)
assert interval.overlaps(interval)
def test_overlaps_nested(self, start_shift, closed, other_closed):
start, shift = start_shift
interval1 = Interval(start, start + 3 * shift, other_closed)
interval2 = Interval(start + shift, start + 2 * shift, closed)
# nested intervals should always overlap
assert interval1.overlaps(interval2)
def test_overlaps_disjoint(self, start_shift, closed, other_closed):
start, shift = start_shift
interval1 = Interval(start, start + shift, other_closed)
interval2 = Interval(start + 2 * shift, start + 3 * shift, closed)
# disjoint intervals should never overlap
assert not interval1.overlaps(interval2)
def test_overlaps_endpoint(self, start_shift, closed, other_closed):
start, shift = start_shift
interval1 = Interval(start, start + shift, other_closed)
interval2 = Interval(start + shift, start + 2 * shift, closed)
# overlap if shared endpoint is closed for both (overlap at a point)
result = interval1.overlaps(interval2)
expected = interval1.closed_right and interval2.closed_left
assert result == expected
@pytest.mark.parametrize(
"other",
[10, True, "foo", Timedelta("1 day"), Timestamp("2018-01-01")],
ids=lambda x: type(x).__name__,
)
def test_overlaps_invalid_type(self, other):
interval = Interval(0, 1)
msg = "`other` must be an Interval, got {other}".format(
other=type(other).__name__
)
with pytest.raises(TypeError, match=msg):
interval.overlaps(other)
|
ericholscher/django
|
refs/heads/master
|
tests/dispatch/tests/test_saferef.py
|
1
|
import unittest
from django.dispatch.saferef import safeRef
from django.utils.six.moves import xrange
class Test1(object):
def x(self):
pass
def test2(obj):
pass
class Test2(object):
def __call__(self, obj):
pass
class SaferefTests(unittest.TestCase):
def setUp(self):
ts = []
ss = []
for x in xrange(5000):
t = Test1()
ts.append(t)
s = safeRef(t.x, self._closure)
ss.append(s)
ts.append(test2)
ss.append(safeRef(test2, self._closure))
for x in xrange(30):
t = Test2()
ts.append(t)
s = safeRef(t, self._closure)
ss.append(s)
self.ts = ts
self.ss = ss
self.closureCount = 0
def tearDown(self):
del self.ts
del self.ss
def testIn(self):
"""Test the "in" operator for safe references (cmp)"""
for t in self.ts[:50]:
self.assertTrue(safeRef(t.x) in self.ss)
def testValid(self):
"""Test that the references are valid (return instance methods)"""
for s in self.ss:
self.assertTrue(s())
def testShortCircuit(self):
"""Test that creation short-circuits to reuse existing references"""
sd = {}
for s in self.ss:
sd[s] = 1
for t in self.ts:
if hasattr(t, 'x'):
self.assertTrue(safeRef(t.x) in sd)
else:
self.assertTrue(safeRef(t) in sd)
def testRepresentation(self):
"""Test that the reference object's representation works
XXX Doesn't currently check the results, just that no error
is raised
"""
repr(self.ss[-1])
def _closure(self, ref):
"""Dumb utility mechanism to increment deletion counter"""
self.closureCount += 1
|
tukutela/Kay-Framework
|
refs/heads/master
|
kay/auth/urls.py
|
10
|
# -*- coding: utf-8 -*-
"""
Kay authentication urls.
:Copyright: (c) 2009 Accense Technology, Inc.
Takashi Matsuo <tmatsuo@candit.jp>,
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from kay.routing import (
ViewGroup, Rule
)
view_groups = [
ViewGroup(
Rule('/login', endpoint='login', view='kay.auth.views.login'),
Rule('/login_box', endpoint='login_box', view='kay.auth.views.login_box'),
Rule('/post_session', endpoint='post_session',
view='kay.auth.views.post_session'),
Rule('/logout', endpoint='logout', view='kay.auth.views.logout'),
Rule('/change_password', endpoint='change_password',
view=('kay.auth.views.ChangePasswordHandler',(), {})),
Rule('/request_reset_password', endpoint='request_reset_password',
view='kay.auth.views.request_reset_password'),
Rule('/reset_password/<session_key>', endpoint='reset_password',
view='kay.auth.views.reset_password'),
)
]
|
jmargeta/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/tests/test_sparse_pca.py
|
7
|
# Author: Vlad Niculae
# License: BSD
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
spca = SparsePCA(n_components=3, n_jobs=2, random_state=0,
alpha=alpha).fit(Y)
U2 = spca.transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
def test_transform_nan():
"""
Test that SparsePCA won't return NaN when there is 0 feature in all
samples.
"""
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
ceglug/math-assistant
|
refs/heads/master
|
mathdemov2/mathdemo2/manageprofile.py
|
1
|
"""
System configuration is one of the most important issues for making this tool more convenient for users.
By handling multiple profiles, we can edit, manage and load the profiles for our system.
By giving more choices for configuration, this makes our tool dynamic and easy to use.
Also configurations can be saved and reused on other systems by means of the profile handling mechanism.
Manage profile is used to perform profile creation and removal operations.
Edit profile is used to edit the default or predefined settings and saves these settings in the configuration files.
Load profile loads the specified configuration file, and assigns the saved settings to the current math assistant object.
All these operations are based on the configuration files in current directory.
@license: Math Assistant is a free software;
You can redistribute it and/or modify it under
the terms of the GNU General Public License
as published by the Free Software Foundation;
@copyright: GNU General Public License version 3.0
@author: MURALI KUMAR. M
@contact: U{murali.au@gmail.com}
"""
import wx
import wx.lib.colourselect as csel
import os
import ConfigParser
import sys
class manageconf(wx.Panel):
"""
This class is used to perform profile creation and removal operations.
when a profile is created it is added with default settings.
"""
def __init__(self, parent,profiles,frame):
"""
Constructor method for this class.
@param parent : Parent class of the object
@param profiles : profiles list in current directory
@param frame : MainFrame class object
"""
wx.Panel.__init__(self, parent)
self.config = ConfigParser.RawConfigParser() #: ConfigParser class instance
self.config.add_section('dict')
self.config.add_section('options')
self.config.add_section('settings')
self.config.add_section('color')
self.profile = None #: current profile name
self.remindex = None #: index no for the profile to remove in the list
self.remlabel = None #: profile name to remove in the list
self.frame = frame #: MainFrame class object
#t = wx.StaticText(self, -1, "This is a PageTwo object", (40,40))
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.rmtext = wx.StaticText(self, -1 , " Select a profile to Remove ")
self.crtext = wx.StaticText(self, -1 , " Type a profile name to Create(without .ext) ")
self.tc = wx.TextCtrl(self, -1, size=(125, -1) )
self.lc = wx.ListBox(self,10,(-1, -1),(170, 130),choices = profiles,style=wx.LB_SINGLE)
self.add = wx.Button(self , 1 , ' ADD ')
self.remove = wx.Button(self , 2 , ' REMOVE ')
self.cancel = wx.Button(self, 3, ' CLOSE ')
self.line = wx.StaticLine(self)
self.tc.SetFocus()
vbox.Add(self.crtext ,0, wx.TOP|wx.ALIGN_CENTER , 30)
hbox1.Add(self.tc ,0,wx.RIGHT,10)
hbox1.Add(self.add ,0)
vbox.Add(hbox1,0,wx.ALL|wx.ALIGN_CENTRE,20)
vbox.Add(self.line,0,wx.TOP|wx.EXPAND,10)
vbox.Add(self.rmtext ,0, wx.TOP|wx.ALIGN_CENTER , 30)
vbox.Add(self.lc , 0 ,wx.ALL|wx.ALIGN_CENTER , 20)
hbox2.Add(self.remove ,0,wx.RIGHT,10)
hbox2.Add(self.cancel ,0)
vbox.Add(hbox2,0,wx.ALL|wx.ALIGN_CENTER , 30)
self.add.Enable(False)
self.lc.SetSelection(0)
self.Bind(wx.EVT_TEXT, self.Ontext,self.tc)
self.Bind(wx.EVT_BUTTON, self.Onadd, id=1)
self.Bind(wx.EVT_BUTTON, self.Onremove, id=2)
self.Bind(wx.EVT_BUTTON, self.Oncancel, id=3)
self.Bind(wx.EVT_LISTBOX, self.Onlistselect ,self.lc)
self.SetSizerAndFit(vbox)
self.Centre()
self.Show(True)
def Onadd(self, event):
"""
This method adds a new profile when clicked ADD button
"""
self.profile = self.tc.GetValue() + '.cfg'
self.defaultprofilecreate()
try:
with open(self.profile, 'wb') as configfile:
self.config.write(configfile)
self.lc.Append(self.profile)
self.add.Enable(False)
self.tc.SetValue('')
except:
print 'File creation error..Try again!'
pass
def Onremove(self, event):
"""
This method removes an existing profile when clicked REMOVE button
"""
if( self.remindex is None or self.remlabel is None or self.remlabel == '' ):
pass
else:
self.removeconfigfile()
self.lc.Delete(self.remindex)
def Oncancel(self, event):
"""
This method closes the ManageProfile notebook dialog
"""
self.frame.Close()
def Ontext(self,event):
"""
Event handler function for the textcontrol used in this dialog
enable/disables add button when a name is provided or not.
"""
i = len(event.GetString())
if(i > 0 ):
self.add.Enable(True)
else:
self.add.Enable(False)
def Onlistselect(self, event):
"""
Event handler function for the list contorl
stores the name of the profile to remove when selected an item
"""
self.remindex = event.GetSelection()
self.remlabel = event.GetString()
def removeconfigfile(self):
"""
This method removes a profile stored in remlabel variable on current directory
"""
try:
fname = os.path.join(os.curdir,self.remlabel)
os.remove(fname)
#print 'remove file name',fname
except:
print 'File remove error..Try again!'
pass
def defaultprofilecreate(self):
"""
This method creates new profile settings on config variable
"""
self.butList1 = range(12) #: Menugroup 1
self.butList2 = range(12) #: Menugroup 2
self.butList3 = range(12) #: Menugroup 3
self.butList1[0] = '9'
self.butList1[1] = '8'
self.butList1[2] = '7'
self.butList1[3] = '6'
self.butList1[4] = '5'
self.butList1[5] = '4'
self.butList1[6] = '3'
self.butList1[7] = '2'
self.butList1[8] = '1'
self.butList1[9] = '0'
self.butList1[10] = '<---'
self.butList1[11] = 'NEXT'
self.butList2[0] = ' + '
self.butList2[1] = ' -- '
self.butList2[2] = ' * '
self.butList2[3] = ' / '
self.butList2[4] = ' = '
self.butList2[5] = 'NEXT'
self.butList2[6] = 'PREV'
self.butList2[7] = 'UP'
self.butList2[8] = 'NEXTT'
self.butList2[9] = 'LEFT'
self.butList2[10] = 'DOWN'
self.butList2[11] = 'RIGHT'
self.butList3[0] = 'DIV'
self.butList3[1] = 'LCM'
self.butList3[2] = 'BY'
self.butList3[3] = 'CROSS'
self.butList3[4] = 'DOT'
self.butList3[5] = 'DEL'
self.butList3[6] = ' ( '
self.butList3[7] = ' ) '
self.butList3[8] = ' , '
self.butList3[9] = ' | '
self.butList3[10] = ' % '
self.butList3[11] = 'NEXT'
self.Sound = False #: sound option
self.Bclick = True #: Button click option
self.Lclick = True #: Left click option
self.Showgrid = False #: show gridlines option
self.Sbar = True #: show statusbar option
self.Mbspeed = 5 #: Menubox speed option
self.Mispeed = 2 #: Menuitem speed option
self.Zoomlevel = 2 #: Zoom level option
self.paneltype = 'VERTICAL' #: Paneltype option
self.panelnos = '3' #: Panel nos option
self.Bpcolor = (128, 255, 255, 255) #: Background panel color option
self.Hlcolor = (255, 0, 0, 255) #: Highlighting color option
self.config.set('dict' ,'butlist1',repr(self.butList1))
self.config.set('dict' ,'butlist2',repr(self.butList2))
self.config.set('dict' ,'butlist3',repr(self.butList3))
self.config.set('options' ,'sound',self.Sound)
self.config.set('options' ,'bclick',self.Bclick)
self.config.set('options' ,'lclick',self.Lclick)
self.config.set('options' ,'showgrid',self.Showgrid)
self.config.set('options' ,'sbar',self.Sbar)
self.config.set('settings' ,'mbspeed',self.Mbspeed)
self.config.set('settings' ,'mipeed',self.Mispeed)
self.config.set('settings' ,'zoomlevel',self.Zoomlevel)
self.config.set('settings' ,'Paneltype',self.paneltype)
self.config.set('settings' ,'Panelnos',self.panelnos)
self.config.set('color' ,'bpcolor',repr(self.Bpcolor))
self.config.set('color' ,'hlcolor',repr(self.Hlcolor))
class MainFrame(wx.Frame):
"""
This class used to show the notebook dialog for ManageProfile
"""
def __init__(self):
"""
Constructor method for this class.
Finds configuration files in current directory and invokes manageconf class
"""
wx.Frame.__init__(self, None, title="Profile Settings")
p = wx.Panel(self, -1) #: main panel instance
nb = wx.Notebook(p,-1, style=wx.BK_DEFAULT) #: notebook instance
self.profiles = [] #: profiles list (.cfg files) in current directory
ext = '.cfg'
for root, dirs, files in os.walk('./'):
for name in files:
filename = os.path.join(root, name)
if os.path.splitext(filename)[1] == ext:
self.profiles.append(name)
#print name
self.manage = manageconf(nb ,self.profiles,self)
nb.AddPage(self.manage, "Manage Profile")
sizer = wx.BoxSizer()
sizer.Add(nb, 1, wx.EXPAND)
p.SetSizer(sizer)
self.Center()
self.Show()
|
bdaroz/the-blue-alliance
|
refs/heads/master
|
tests/suggestions/test_suggestion_creator.py
|
4
|
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.auth_type import AuthType
from consts.event_type import EventType
from consts.media_type import MediaType
from helpers.media_helper import MediaParser
from helpers.suggestions.suggestion_creator import SuggestionCreator
from models.account import Account
from models.event import Event
from models.match import Match
from models.media import Media
from models.suggestion import Suggestion
from models.team import Team
class TestTeamMediaSuggestionCreator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="user@example.com",
registered=True)
self.account.put()
self.account_banned = Account.get_or_insert(
"456",
email="user@example.com",
registered=True,
shadow_banned=True,
)
self.account_banned.put()
def tearDown(self):
self.testbed.deactivate()
def test_create_suggestion(self):
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
"http://imgur.com/ruRAxDm",
"frc1124",
"2016")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestion_id = Suggestion.render_media_key_name('2016', 'team', 'frc1124', 'imgur', 'ruRAxDm')
suggestion = Suggestion.get_by_id(suggestion_id)
expected_dict = MediaParser.partial_media_dict_from_url("http://imgur.com/ruRAxDm")
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.target_model, 'media')
self.assertDictContainsSubset(expected_dict, suggestion.contents)
def test_create_suggestion_banned(self):
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account_banned.key,
"http://imgur.com/ruRAxDm",
"frc1124",
"2016")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestion_id = Suggestion.render_media_key_name('2016', 'team', 'frc1124', 'imgur', 'ruRAxDm')
suggestion = Suggestion.get_by_id(suggestion_id)
expected_dict = MediaParser.partial_media_dict_from_url("http://imgur.com/ruRAxDm")
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_AUTOREJECTED)
self.assertEqual(suggestion.author, self.account_banned.key)
self.assertEqual(suggestion.target_model, 'media')
self.assertDictContainsSubset(expected_dict, suggestion.contents)
def test_create_suggestion_with_url_params(self):
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
"https://www.youtube.com/watch?v=VP992UKFbko",
"frc1124",
"2016")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestion_id = Suggestion.render_media_key_name('2016', 'team', 'frc1124', 'youtube', 'VP992UKFbko')
suggestion = Suggestion.get_by_id(suggestion_id)
expected_dict = MediaParser.partial_media_dict_from_url("https://www.youtube.com/watch?v=VP992UKFbko")
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.target_model, 'media')
self.assertDictContainsSubset(expected_dict, suggestion.contents)
def test_clean_url(self):
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
" http://imgur.com/ruRAxDm?foo=bar#meow ",
"frc1124",
"2016")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestion_id = Suggestion.render_media_key_name('2016', 'team', 'frc1124', 'imgur', 'ruRAxDm')
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.target_model, 'media')
def test_duplicate_suggestion(self):
suggestion_id = Suggestion.render_media_key_name('2016', 'team', 'frc1124', 'imgur', 'ruRAxDm')
Suggestion(
id=suggestion_id,
author=self.account.key,
review_state=Suggestion.REVIEW_PENDING,
target_key="2012cmp",
target_model="event").put()
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
"http://imgur.com/ruRAxDm",
"frc1124",
"2016")
self.assertEqual(status, 'suggestion_exists')
def test_media_exists(self):
media_id = Media.render_key_name(MediaType.IMGUR, 'ruRAxDm')
Media.get_or_insert(
media_id,
media_type_enum=MediaType.IMGUR,
foreign_key='ruRAxDm',
references=[ndb.Key(Team, 'frc1124')]).put()
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
"http://imgur.com/ruRAxDm",
"frc1124",
"2016")
self.assertEqual(status, 'media_exists')
def test_bad_url(self):
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
"http://foo.com/blah",
"frc1124",
"2016")
self.assertEqual(status, 'bad_url')
class TestEventMediaSuggestionCreator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="user@example.com",
registered=True)
self.account.put()
self.account_banned = Account.get_or_insert(
"456",
email="user@example.com",
registered=True,
shadow_banned=True,
)
self.account_banned.put()
def tearDown(self):
self.testbed.deactivate()
def test_create_suggestion(self):
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account.key,
"https://www.youtube.com/watch?v=H-54KMwMKY0",
"2016nyny")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestion_id = Suggestion.render_media_key_name('2016', 'event', '2016nyny', 'youtube', 'H-54KMwMKY0')
suggestion = Suggestion.get_by_id(suggestion_id)
expected_dict = MediaParser.partial_media_dict_from_url("https://www.youtube.com/watch?v=H-54KMwMKY0")
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.target_model, 'event_media')
self.assertDictContainsSubset(expected_dict, suggestion.contents)
def test_create_suggestion_banned(self):
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account_banned.key,
"https://www.youtube.com/watch?v=H-54KMwMKY0",
"2016nyny")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestion_id = Suggestion.render_media_key_name('2016', 'event', '2016nyny', 'youtube', 'H-54KMwMKY0')
suggestion = Suggestion.get_by_id(suggestion_id)
expected_dict = MediaParser.partial_media_dict_from_url("https://www.youtube.com/watch?v=H-54KMwMKY0")
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_AUTOREJECTED)
self.assertEqual(suggestion.author, self.account_banned.key)
self.assertEqual(suggestion.target_model, 'event_media')
self.assertDictContainsSubset(expected_dict, suggestion.contents)
def test_create_non_video_suggestion(self):
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account.key,
"http://imgur.com/ruRAxDm",
"2016nyny")
self.assertEqual(status, 'bad_url')
def test_duplicate_suggestion(self):
suggestion_id = Suggestion.render_media_key_name('2016', 'event', '2016nyny', 'youtube', 'H-54KMwMKY0')
Suggestion(
id=suggestion_id,
author=self.account.key,
review_state=Suggestion.REVIEW_PENDING,
target_key="2016nyny",
target_model="event_media").put()
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account.key,
"https://www.youtube.com/watch?v=H-54KMwMKY0",
"2016nyny")
self.assertEqual(status, 'suggestion_exists')
def test_media_exists(self):
media_id = Media.render_key_name(MediaType.YOUTUBE_VIDEO, 'H-54KMwMKY0')
Media.get_or_insert(
media_id,
media_type_enum=MediaType.YOUTUBE_VIDEO,
foreign_key='H-54KMwMKY0',
references=[ndb.Key(Event, '2016nyny')]).put()
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account.key,
"https://www.youtube.com/watch?v=H-54KMwMKY0",
"2016nyny")
self.assertEqual(status, 'media_exists')
def test_create_bad_url(self):
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account.key,
"http://foobar.com/ruRAxDm",
"2016nyny")
self.assertEqual(status, 'bad_url')
class TestOffseasonEventSuggestionCreator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="user@example.com",
registered=True)
self.account.put()
self.account_banned = Account.get_or_insert(
"456",
email="user@example.com",
registered=True,
shadow_banned=True,
)
self.account_banned.put()
def tearDown(self):
self.testbed.deactivate()
def test_create_suggestion(self):
status, _ = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street",
"New York", "NY", "USA")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.contents['name'], "Test Event")
self.assertEqual(suggestion.contents['start_date'], '2016-5-1')
self.assertEqual(suggestion.contents['end_date'], '2016-5-2')
self.assertEqual(suggestion.contents['website'], 'http://foo.bar.com')
self.assertEqual(suggestion.contents['address'], '123 Fake Street')
self.assertEqual(suggestion.contents['city'], 'New York')
self.assertEqual(suggestion.contents['state'], 'NY')
self.assertEqual(suggestion.contents['country'], 'USA')
self.assertEqual(suggestion.contents['venue_name'], 'The Venue')
def test_create_suggestion_banned(self):
status, _ = SuggestionCreator.createOffseasonEventSuggestion(
self.account_banned.key,
"Test Event",
"2016-5-1",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street",
"New York", "NY", "USA")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.contents['name'], "Test Event")
self.assertEqual(suggestion.contents['start_date'], '2016-5-1')
self.assertEqual(suggestion.contents['end_date'], '2016-5-2')
self.assertEqual(suggestion.contents['website'], 'http://foo.bar.com')
self.assertEqual(suggestion.contents['address'], '123 Fake Street')
self.assertEqual(suggestion.contents['city'], 'New York')
self.assertEqual(suggestion.contents['state'], 'NY')
self.assertEqual(suggestion.contents['country'], 'USA')
self.assertEqual(suggestion.contents['venue_name'], 'The Venue')
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_AUTOREJECTED)
def test_missing_params(self):
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"",
"2016-5-1",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('name' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('start_date' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('end_date' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"2016-5-2",
"",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('website' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('venue_address' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"2016-5-2",
"http://foo.bar.com",
"",
"123 Fake Street", "", "", "")
self.assertEqual(status, 'validation_failure')
self.assertTrue('venue_name' in failures)
self.assertTrue('venue_city' in failures)
self.assertTrue('venue_state' in failures)
self.assertTrue('venue_country' in failures)
def test_out_of_order_dates(self):
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-4",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('end_date' in failures)
def test_malformed_dates(self):
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"meow",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('start_date' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"moo",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('end_date' in failures)
class TestApiWriteSuggestionCreator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="user@example.com",
registered=True)
self.account.put()
self.account_banned = Account.get_or_insert(
"456",
email="user@example.com",
registered=True,
shadow_banned=True,
)
self.account_banned.put()
def tearDown(self):
self.testbed.deactivate()
def test_create_suggestion(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createApiWriteSuggestion(
self.account.key,
"2016test",
"Event Organizer",
[1, 2, 3])
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.contents['event_key'], "2016test")
self.assertEqual(suggestion.contents['affiliation'], "Event Organizer")
self.assertListEqual(suggestion.contents['auth_types'], [1, 2, 3])
def test_create_suggestion_banned(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createApiWriteSuggestion(
self.account_banned.key,
"2016test",
"Event Organizer",
[1, 2, 3])
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.contents['event_key'], "2016test")
self.assertEqual(suggestion.contents['affiliation'], "Event Organizer")
self.assertListEqual(suggestion.contents['auth_types'], [1, 2, 3])
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_AUTOREJECTED)
def test_official_event(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.REGIONAL)
event.put()
status = SuggestionCreator.createApiWriteSuggestion(
self.account.key,
"2016test",
"Event Organizer",
[AuthType.MATCH_VIDEO, AuthType.EVENT_MATCHES, AuthType.EVENT_ALLIANCES])
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created with only MATCH_VIDEO permission
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.contents['event_key'], "2016test")
self.assertEqual(suggestion.contents['affiliation'], "Event Organizer")
self.assertListEqual(suggestion.contents['auth_types'], [AuthType.MATCH_VIDEO])
def test_no_event(self):
status = SuggestionCreator.createApiWriteSuggestion(
self.account.key,
"2016test",
"Event Organizer",
[1, 2, 3])
self.assertEqual(status, 'bad_event')
def test_no_role(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createApiWriteSuggestion(
self.account.key,
"2016test",
"",
[1, 2, 3])
self.assertEqual(status, 'no_affiliation')
def test_undefined_auth_type(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createApiWriteSuggestion(
self.account.key,
"2016test",
"Event Organizer",
[1, 2, -1, -2]) # -1 and -2 should be filtered out
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.contents['event_key'], "2016test")
self.assertEqual(suggestion.contents['affiliation'], "Event Organizer")
self.assertListEqual(suggestion.contents['auth_types'], [1, 2])
class TestSuggestEventWebcastCreator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="user@example.com",
registered=True)
self.account.put()
self.account_banned = Account.get_or_insert(
"456",
email="user@example.com",
registered=True,
shadow_banned=True,
)
self.account_banned.put()
def tearDown(self):
self.testbed.deactivate()
def test_bad_event(self):
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'bad_event')
def test_create_suggestion(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
expected_key = "webcast_2016test_twitch_frcgamesense_None"
suggestion = Suggestion.get_by_id(expected_key)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.target_key, "2016test")
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertIsNotNone(suggestion.contents)
self.assertEqual(suggestion.contents.get('webcast_url'), "http://twitch.tv/frcgamesense")
self.assertIsNotNone(suggestion.contents.get('webcast_dict'))
def test_create_suggestion_banned(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account_banned.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
expected_key = "webcast_2016test_twitch_frcgamesense_None"
suggestion = Suggestion.get_by_id(expected_key)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.target_key, "2016test")
self.assertEqual(suggestion.author, self.account_banned.key)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_AUTOREJECTED)
self.assertIsNotNone(suggestion.contents)
self.assertEqual(suggestion.contents.get('webcast_url'), "http://twitch.tv/frcgamesense")
self.assertIsNotNone(suggestion.contents.get('webcast_dict'))
def test_cleanup_url_without_scheme(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'success')
expected_key = "webcast_2016test_twitch_frcgamesense_None"
suggestion = Suggestion.get_by_id(expected_key)
self.assertIsNotNone(suggestion)
self.assertIsNotNone(suggestion.contents)
self.assertIsNotNone(suggestion.contents.get('webcast_dict'))
self.assertEqual(suggestion.contents.get('webcast_url'), "http://twitch.tv/frcgamesense")
def test_unknown_url_scheme(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://myweb.site/somewebcast",
"",
"2016test")
self.assertEqual(status, 'success')
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertIsNotNone(suggestion.contents)
self.assertIsNone(suggestion.contents.get('webcast_dict'))
self.assertEqual(suggestion.contents.get('webcast_url'), "http://myweb.site/somewebcast")
def test_webcast_already_exists(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016,
event_type_enum=EventType.OFFSEASON,
webcast_json="[{\"type\": \"twitch\", \"channel\": \"frcgamesense\"}]")
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'webcast_exists')
def test_duplicate_suggestion(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'success')
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'suggestion_exists')
def test_duplicate_unknown_suggestion_type(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://myweb.site/somewebcast",
"",
"2016test")
self.assertEqual(status, 'success')
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://myweb.site/somewebcast",
"",
"2016test")
self.assertEqual(status, 'suggestion_exists')
def test_webcast_bad_date(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016,
event_type_enum=EventType.OFFSEASON,
webcast_json="[{\"type\": \"twitch\", \"channel\": \"frcgamesense\"}]")
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"BAD DATE",
"2016test")
self.assertEqual(status, 'invalid_date')
def test_webcast_good_date(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"2017-02-28",
"2016test")
self.assertEqual(status, 'success')
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.target_key, "2016test")
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertIsNotNone(suggestion.contents)
self.assertEqual(suggestion.contents.get('webcast_url'), "http://twitch.tv/frcgamesense")
self.assertIsNotNone(suggestion.contents.get('webcast_dict'))
self.assertEqual(suggestion.contents.get('webcast_date'), "2017-02-28")
class TestSuggestMatchVideoYouTube(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="user@example.com",
registered=True)
self.account.put()
self.account_banned = Account.get_or_insert(
"456",
email="user@example.com",
registered=True,
shadow_banned=True,
)
self.account_banned.put()
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
self.match = Match(id="2016test_f1m1", event=ndb.Key(Event, "2016test"), year=2016, comp_level="f", set_number=1, match_number=1, alliances_json='')
self.match.put()
def tearDown(self):
self.testbed.deactivate()
def test_bad_match(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "37F5tbrFqJQ", "2016necmp_f1m2")
self.assertEqual(status, 'bad_match')
def test_create_suggestion(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "37F5tbrFqJQ", "2016test_f1m1")
self.assertEqual(status, 'success')
suggestion_id = "media_2016_match_2016test_f1m1_youtube_37F5tbrFqJQ"
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.target_key, '2016test_f1m1')
self.assertEqual(suggestion.target_model, 'match')
self.assertIsNotNone(suggestion.contents)
self.assertIsNotNone(suggestion.contents.get('youtube_videos'))
self.assertEqual(len(suggestion.contents.get('youtube_videos')), 1)
self.assertEqual(suggestion.contents.get('youtube_videos')[0], "37F5tbrFqJQ")
def test_create_suggestion_banned(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account_banned.key, "37F5tbrFqJQ", "2016test_f1m1")
self.assertEqual(status, 'success')
suggestion_id = "media_2016_match_2016test_f1m1_youtube_37F5tbrFqJQ"
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.author, self.account_banned.key)
self.assertEqual(suggestion.target_key, '2016test_f1m1')
self.assertEqual(suggestion.target_model, 'match')
self.assertIsNotNone(suggestion.contents)
self.assertIsNotNone(suggestion.contents.get('youtube_videos'))
self.assertEqual(len(suggestion.contents.get('youtube_videos')), 1)
self.assertEqual(suggestion.contents.get('youtube_videos')[0], "37F5tbrFqJQ")
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_AUTOREJECTED)
def test_existing_video(self):
self.match.youtube_videos = ["37F5tbrFqJQ"]
self.match.put()
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "37F5tbrFqJQ", "2016test_f1m1")
self.assertEqual(status, 'video_exists')
def test_existing_suggestion(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "37F5tbrFqJQ", "2016test_f1m1")
self.assertEqual(status, 'success')
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "37F5tbrFqJQ", "2016test_f1m1")
self.assertEqual(status, 'suggestion_exists')
def test_bad_youtube_key(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "", "2016test_f1m1")
self.assertEqual(status, 'bad_url')
|
tylerjereddy/scipy
|
refs/heads/master
|
scipy/io/harwell_boeing/__init__.py
|
21
|
from scipy.io.harwell_boeing.hb import MalformedHeader, HBInfo, HBFile, \
HBMatrixType, hb_read, hb_write
|
bombilee/pywinauto
|
refs/heads/master
|
sandbox/_find_control_distance.py
|
17
|
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
from application import FindWindows
win = FindWindows(title = "Replace", class_name = "#32770")[0]
from findbestmatch import find_best_match
# get those visible controls that have visible window text
visibleTextChildren = [w for w in win.Children if w.IsVisible and w.Text]
# get those visible controls that do not have visible window text
visibleNonTextChildren = [w for w in win.Children if w.IsVisible and not w.Text]
distance_cuttoff = 999
def FindClosestControl(ctrl, text_ctrls):
name = ctrl.FriendlyClassName()
# now for each of the visible text controls
for text_ctrl in text_ctrls:
# get aliases to the control rectangles
text_r = text_ctrl.Rectangle()
ctrl_r = w2.Rectangle()
# skip controls where w is to the right of ctrl
if text_r.left >= ctrl_r.right:
continue
# skip controls where w is below ctrl
if text_r.top >= ctrl_r.bottom:
continue
# calculate the distance between the controls
` # (x^2 + y^2)^.5
distance = (
(text_r.left - ctrl_r.left) ** 2 + # (x^2 +
(text_r.top - ctrl_r.top) ** 2) \ # y^2)
** .5 # ^.5
# if this distance was closer then the last one
if distance_cuttoff > distance < closest and:
closest = distance
name = text_ctrl.Text.replace(' ', '').replace ('&', '') + ctrl.FriendlyClassName()
return name
# for each of the items that do not have visible text
for w2 in visibleNonTextChildren:
closest = 999
newname = ''
# now for each of the visible text controls
for text_child in visibleTextChildren:
# skip controls where w is to the right of w2
if text_child.Rectangle.left >= w2.Rectangle.right:
continue
# skip controls where w is below w2
if text_child.Rectangle.top >= w2.Rectangle.bottom:
continue
# calculate teh distance to the control
wr = text_child.Rectangle()
w2r = w2.Rectangle()
distance = ((wr.left - w2r.left) ** 2.0 + (wr.top - w2r.top) ** 2.0) ** .5
# if this distance was closer then the last one
if distance < closest:
closest = distance
newname = text_child.Text.replace(' ', '').replace ('&', '') + w2.FriendlyClassName
if closest != 999:
print newname
|
vicky2135/lucious
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/wcwidth/table_wide.py
|
28
|
"""Wide_Eastasian table. Created by setup.py."""
# Generated: 2016-07-02T04:20:28.048222
# Source: EastAsianWidth-9.0.0.txt
# Date: 2016-05-27, 17:00:00 GMT [KW, LI]
WIDE_EASTASIAN = (
(0x1100, 0x115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x231a, 0x231b,), # Watch ..Hourglass
(0x2329, 0x232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x23e9, 0x23ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x23f0, 0x23f0,), # Alarm Clock ..Alarm Clock
(0x23f3, 0x23f3,), # Hourglass With Flowing S..Hourglass With Flowing S
(0x25fd, 0x25fe,), # White Medium Small Squar..Black Medium Small Squar
(0x2614, 0x2615,), # Umbrella With Rain Drops..Hot Beverage
(0x2648, 0x2653,), # Aries ..Pisces
(0x267f, 0x267f,), # Wheelchair Symbol ..Wheelchair Symbol
(0x2693, 0x2693,), # Anchor ..Anchor
(0x26a1, 0x26a1,), # High Voltage Sign ..High Voltage Sign
(0x26aa, 0x26ab,), # Medium White Circle ..Medium Black Circle
(0x26bd, 0x26be,), # Soccer Ball ..Baseball
(0x26c4, 0x26c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x26ce, 0x26ce,), # Ophiuchus ..Ophiuchus
(0x26d4, 0x26d4,), # No Entry ..No Entry
(0x26ea, 0x26ea,), # Church ..Church
(0x26f2, 0x26f3,), # Fountain ..Flag In Hole
(0x26f5, 0x26f5,), # Sailboat ..Sailboat
(0x26fa, 0x26fa,), # Tent ..Tent
(0x26fd, 0x26fd,), # Fuel Pump ..Fuel Pump
(0x2705, 0x2705,), # White Heavy Check Mark ..White Heavy Check Mark
(0x270a, 0x270b,), # Raised Fist ..Raised Hand
(0x2728, 0x2728,), # Sparkles ..Sparkles
(0x274c, 0x274c,), # Cross Mark ..Cross Mark
(0x274e, 0x274e,), # Negative Squared Cross M..Negative Squared Cross M
(0x2753, 0x2755,), # Black Question Mark Orna..White Exclamation Mark O
(0x2757, 0x2757,), # Heavy Exclamation Mark S..Heavy Exclamation Mark S
(0x2795, 0x2797,), # Heavy Plus Sign ..Heavy Division Sign
(0x27b0, 0x27b0,), # Curly Loop ..Curly Loop
(0x27bf, 0x27bf,), # Double Curly Loop ..Double Curly Loop
(0x2b1b, 0x2b1c,), # Black Large Square ..White Large Square
(0x2b50, 0x2b50,), # White Medium Star ..White Medium Star
(0x2b55, 0x2b55,), # Heavy Large Circle ..Heavy Large Circle
(0x2e80, 0x2e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x2e9b, 0x2ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x2f00, 0x2fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x2ff0, 0x2ffb,), # Ideographic Description ..Ideographic Description
(0x3000, 0x303e,), # Ideographic Space ..Ideographic Variation In
(0x3041, 0x3096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x3099, 0x30ff,), # Combining Katakana-hirag..Katakana Digraph Koto
(0x3105, 0x312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
(0x3131, 0x318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x3190, 0x31ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x31c0, 0x31e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x31f0, 0x321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x3220, 0x3247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x3250, 0x32fe,), # Partnership Sign ..Circled Katakana Wo
(0x3300, 0x4dbf,), # Square Apaato ..
(0x4e00, 0xa48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0xa490, 0xa4c6,), # Yi Radical Qot ..Yi Radical Ke
(0xa960, 0xa97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0xac00, 0xd7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0xf900, 0xfaff,), # Cjk Compatibility Ideogr..
(0xfe10, 0xfe19,), # Presentation Form For Ve..Presentation Form For Ve
(0xfe30, 0xfe52,), # Presentation Form For Ve..Small Full Stop
(0xfe54, 0xfe66,), # Small Semicolon ..Small Equals Sign
(0xfe68, 0xfe6b,), # Small Reverse Solidus ..Small Commercial At
(0xff01, 0xff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0xffe0, 0xffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe0,), # (nil) ..
(0x17000, 0x187ec,), # (nil) ..
(0x18800, 0x18af2,), # (nil) ..
(0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon ..Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker..Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab ..Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag ..Waving Black Flag
(0x1f3f8, 0x1f43e,), # Badminton Racquet And Sh..Paw Prints
(0x1f440, 0x1f440,), # Eyes ..Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # (nil) ..
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # (nil) ..
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation ..Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6f6,), # (nil) ..
(0x1f910, 0x1f91e,), # Zipper-mouth Face ..
(0x1f920, 0x1f927,), # (nil) ..
(0x1f930, 0x1f930,), # (nil) ..
(0x1f933, 0x1f93e,), # (nil) ..
(0x1f940, 0x1f94b,), # (nil) ..
(0x1f950, 0x1f95e,), # (nil) ..
(0x1f980, 0x1f991,), # Crab ..
(0x1f9c0, 0x1f9c0,), # Cheese Wedge ..Cheese Wedge
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..
(0x30000, 0x3fffd,), # (nil) ..
)
|
wehkamp/ansible
|
refs/heads/devel
|
test/units/template/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
gramps-project/gramps
|
refs/heads/master
|
gramps/gui/editors/displaytabs/surnamemodel.py
|
11
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2010 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# GTK libraries
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps classes
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# SurnamModel
#
#-------------------------------------------------------------------------
class SurnameModel(Gtk.ListStore):
def __init__(self, surn_list, db):
#setup model for the treeview
Gtk.ListStore.__init__(self, str, str, str, str,
bool, object)
for surn in surn_list:
# fill the liststore
self.append(row=[surn.get_prefix(), surn.get_surname(),
surn.get_connector(), str(surn.get_origintype()),
surn.get_primary(), surn])
self.db = db
|
titilambert/teeawards
|
refs/heads/master
|
old/beaker/crypto/jcecrypto.py
|
14
|
"""
Encryption module that uses the Java Cryptography Extensions (JCE).
Note that in default installations of the Java Runtime Environment, the
maximum key length is limited to 128 bits due to US export
restrictions. This makes the generated keys incompatible with the ones
generated by pycryptopp, which has no such restrictions. To fix this,
download the "Unlimited Strength Jurisdiction Policy Files" from Sun,
which will allow encryption using 256 bit AES keys.
"""
from javax.crypto import Cipher
from javax.crypto.spec import SecretKeySpec, IvParameterSpec
import jarray
# Initialization vector filled with zeros
_iv = IvParameterSpec(jarray.zeros(16, 'b'))
def aesEncrypt(data, key):
cipher = Cipher.getInstance('AES/CTR/NoPadding')
skeySpec = SecretKeySpec(key, 'AES')
cipher.init(Cipher.ENCRYPT_MODE, skeySpec, _iv)
return cipher.doFinal(data).tostring()
# magic.
aesDecrypt = aesEncrypt
def getKeyLength():
maxlen = Cipher.getMaxAllowedKeyLength('AES/CTR/NoPadding')
return min(maxlen, 256) / 8
|
QuantSoftware/QuantSoftwareToolkit
|
refs/heads/master
|
Legacy/Legacy/qstkmodels/OrderModel.py
|
5
|
import tables as pt #@UnresolvedImport
import time
class FillModel(pt.IsDescription):
timestamp = pt.Time64Col()
quantity = pt.Int32Col()
cashChange = pt.Float32Col()
commission = pt.Float32Col()
impactCost = pt.Float32Col()
class OrderModel(pt.IsDescription):
task = pt.StringCol(5)
shares = pt.Int32Col()
symbol = pt.StringCol(30)
order_type = pt.StringCol(5) #moo moc limit vwap
duration = pt.Time64Col()
timestamp = pt.Time64Col()
close_type = pt.StringCol(4) #lifo or fifo for a sell, none for a buy
limit_price = pt.Float32Col()
fill = FillModel()
|
tboyce1/home-assistant
|
refs/heads/dev
|
homeassistant/components/binary_sensor/maxcube.py
|
4
|
"""
Support for MAX! Window Shutter via MAX! Cube.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/maxcube/
"""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.maxcube import MAXCUBE_HANDLE
from homeassistant.const import STATE_UNKNOWN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Iterate through all MAX! Devices and add window shutters."""
cube = hass.data[MAXCUBE_HANDLE].cube
devices = []
for device in cube.devices:
name = "{} {}".format(
cube.room_by_id(device.room_id).name, device.name)
# Only add Window Shutters
if cube.is_windowshutter(device):
devices.append(MaxCubeShutter(hass, name, device.rf_address))
if devices:
add_devices(devices)
class MaxCubeShutter(BinarySensorDevice):
"""Representation of a MAX! Cube Binary Sensor device."""
def __init__(self, hass, name, rf_address):
"""Initialize MAX! Cube BinarySensorDevice."""
self._name = name
self._sensor_type = 'window'
self._rf_address = rf_address
self._cubehandle = hass.data[MAXCUBE_HANDLE]
self._state = STATE_UNKNOWN
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the BinarySensorDevice."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return self._sensor_type
@property
def is_on(self):
"""Return true if the binary sensor is on/open."""
return self._state
def update(self):
"""Get latest data from MAX! Cube."""
self._cubehandle.update()
device = self._cubehandle.cube.device_by_rf(self._rf_address)
self._state = device.is_open
|
dparlevliet/zelenka-report-storage
|
refs/heads/master
|
server-db/twisted/internet/test/test_epollreactor.py
|
39
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.epollreactor}.
"""
from __future__ import division, absolute_import
from twisted.trial.unittest import TestCase
try:
from twisted.internet.epollreactor import _ContinuousPolling
except ImportError:
_ContinuousPolling = None
from twisted.internet.task import Clock
from twisted.internet.error import ConnectionDone
class Descriptor(object):
"""
Records reads and writes, as if it were a C{FileDescriptor}.
"""
def __init__(self):
self.events = []
def fileno(self):
return 1
def doRead(self):
self.events.append("read")
def doWrite(self):
self.events.append("write")
def connectionLost(self, reason):
reason.trap(ConnectionDone)
self.events.append("lost")
class ContinuousPollingTests(TestCase):
"""
L{_ContinuousPolling} can be used to read and write from C{FileDescriptor}
objects.
"""
def test_addReader(self):
"""
Adding a reader when there was previously no reader starts up a
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
self.assertEqual(poller._loop, None)
reader = object()
self.assertFalse(poller.isReading(reader))
poller.addReader(reader)
self.assertNotEqual(poller._loop, None)
self.assertTrue(poller._loop.running)
self.assertIs(poller._loop.clock, poller._reactor)
self.assertTrue(poller.isReading(reader))
def test_addWriter(self):
"""
Adding a writer when there was previously no writer starts up a
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
self.assertEqual(poller._loop, None)
writer = object()
self.assertFalse(poller.isWriting(writer))
poller.addWriter(writer)
self.assertNotEqual(poller._loop, None)
self.assertTrue(poller._loop.running)
self.assertIs(poller._loop.clock, poller._reactor)
self.assertTrue(poller.isWriting(writer))
def test_removeReader(self):
"""
Removing a reader stops the C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
reader = object()
poller.addReader(reader)
poller.removeReader(reader)
self.assertEqual(poller._loop, None)
self.assertEqual(poller._reactor.getDelayedCalls(), [])
self.assertFalse(poller.isReading(reader))
def test_removeWriter(self):
"""
Removing a writer stops the C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
poller.removeWriter(writer)
self.assertEqual(poller._loop, None)
self.assertEqual(poller._reactor.getDelayedCalls(), [])
self.assertFalse(poller.isWriting(writer))
def test_removeUnknown(self):
"""
Removing unknown readers and writers silently does nothing.
"""
poller = _ContinuousPolling(Clock())
poller.removeWriter(object())
poller.removeReader(object())
def test_multipleReadersAndWriters(self):
"""
Adding multiple readers and writers results in a single
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
self.assertNotEqual(poller._loop, None)
poller.addWriter(object())
self.assertNotEqual(poller._loop, None)
poller.addReader(object())
self.assertNotEqual(poller._loop, None)
poller.addReader(object())
poller.removeWriter(writer)
self.assertNotEqual(poller._loop, None)
self.assertTrue(poller._loop.running)
self.assertEqual(len(poller._reactor.getDelayedCalls()), 1)
def test_readerPolling(self):
"""
Adding a reader causes its C{doRead} to be called every 1
milliseconds.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
poller.addReader(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read"])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read", "read"])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read", "read", "read"])
def test_writerPolling(self):
"""
Adding a writer causes its C{doWrite} to be called every 1
milliseconds.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
poller.addWriter(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write"])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write", "write"])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write", "write", "write"])
def test_connectionLostOnRead(self):
"""
If a C{doRead} returns a value indicating disconnection,
C{connectionLost} is called on it.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
desc.doRead = lambda: ConnectionDone()
poller.addReader(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["lost"])
def test_connectionLostOnWrite(self):
"""
If a C{doWrite} returns a value indicating disconnection,
C{connectionLost} is called on it.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
desc.doWrite = lambda: ConnectionDone()
poller.addWriter(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["lost"])
def test_removeAll(self):
"""
L{_ContinuousPolling.removeAll} removes all descriptors and returns
the readers and writers.
"""
poller = _ContinuousPolling(Clock())
reader = object()
writer = object()
both = object()
poller.addReader(reader)
poller.addReader(both)
poller.addWriter(writer)
poller.addWriter(both)
removed = poller.removeAll()
self.assertEqual(poller.getReaders(), [])
self.assertEqual(poller.getWriters(), [])
self.assertEqual(len(removed), 3)
self.assertEqual(set(removed), set([reader, writer, both]))
def test_getReaders(self):
"""
L{_ContinuousPolling.getReaders} returns a list of the read
descriptors.
"""
poller = _ContinuousPolling(Clock())
reader = object()
poller.addReader(reader)
self.assertIn(reader, poller.getReaders())
def test_getWriters(self):
"""
L{_ContinuousPolling.getWriters} returns a list of the write
descriptors.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
self.assertIn(writer, poller.getWriters())
if _ContinuousPolling is None:
skip = "epoll not supported in this environment."
|
whip112/Whip112
|
refs/heads/master
|
vendor/packages/logilab/astng/test/data/all.py
|
42
|
name = 'a'
_bla = 2
other = 'o'
class Aaa: pass
def func(): print 'yo'
__all__ = 'Aaa', '_bla', 'name'
|
Lemon04/Symfony
|
refs/heads/master
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
QuLogic/vispy
|
refs/heads/master
|
vispy/plot/__init__.py
|
20
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
This module provides functions for displaying data from a command-line
interface.
**NOTE**: This module is still experimental, and under development.
It currently lacks axes, but that is a high-priority target for
the next release.
Usage
-----
To use `vispy.plot` typically the main class `Fig` is first instantiated::
>>> from vispy.plot import Fig
>>> fig = Fig()
And then `PlotWidget` instances are automatically created by accessing
the ``fig`` instance::
>>> ax_left = fig[0, 0]
>>> ax_right = fig[0, 1]
Then plots are accomplished via methods of the `PlotWidget` instances::
>>> import numpy as np
>>> data = np.random.randn(2, 10)
>>> ax_left.plot(data)
>>> ax_right.histogram(data[1])
"""
from .fig import Fig # noqa
from .plotwidget import PlotWidget # noqa
from ..scene import * # noqa
|
openiitbombayx/edx-platform
|
refs/heads/master
|
cms/tests/test_startup.py
|
154
|
"""
Test cms startup
"""
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from mock import patch
from cms.startup import run, enable_theme
class StartupTestCase(TestCase):
"""
Test cms startup
"""
def setUp(self):
super(StartupTestCase, self).setUp()
@patch.dict("django.conf.settings.FEATURES", {"USE_CUSTOM_THEME": True})
@override_settings(THEME_NAME="bar")
def test_run_with_theme(self):
self.assertEqual(settings.FEATURES["USE_CUSTOM_THEME"], True)
with patch('cms.startup.enable_theme') as mock_enable_theme:
run()
self.assertTrue(mock_enable_theme.called)
@patch.dict("django.conf.settings.FEATURES", {"USE_CUSTOM_THEME": False})
def test_run_without_theme(self):
self.assertEqual(settings.FEATURES["USE_CUSTOM_THEME"], False)
with patch('cms.startup.enable_theme') as mock_enable_theme:
run()
self.assertFalse(mock_enable_theme.called)
@patch.dict("django.conf.settings.FEATURES", {"USE_CUSTOM_THEME": True})
@override_settings(THEME_NAME="bar")
@override_settings(FAVICON_PATH="images/favicon.ico")
def test_enable_theme(self):
enable_theme()
self.assertEqual(
settings.FAVICON_PATH,
'themes/bar/images/favicon.ico'
)
exp_path = (u'themes/bar', settings.ENV_ROOT / "themes/bar/static")
self.assertIn(exp_path, settings.STATICFILES_DIRS)
|
gispd/resources
|
refs/heads/master
|
PythonAddins/CopyMapPoint/Install/CopyMapPoint_addin.py
|
1
|
# The MIT License (MIT)
# Copyright (c) [2014] [GIS Professional Development, gispd.com]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import os
import threading
import webbrowser
import arcpy
def add_to_clip_board(text):
"""Copy text to the clip board."""
print(text.strip())
command = 'echo {}| clip'.format(text.strip())
os.system(command)
def dd_to_dms(dd):
"""Convert decimal degrees to degrees, minutes, seconds."""
minutes, seconds = divmod(abs(dd)*3600, 60)
degrees, minutes = divmod(minutes, 60)
seconds = float('{0:.2f}'.format(seconds))
return int(degrees), int(minutes), seconds
# A decorator that will run its wrapped function in a new thread
def run_in_other_thread(function):
# functool.wraps will copy over the docstring and some other metadata from the original function
@functools.wraps(function)
def fn_(*args, **kwargs):
thread = threading.Thread(target=function, args=args, kwargs=kwargs)
thread.start()
thread.join()
return fn_
class CopyMapPoint(object):
"""Implementation for CopyMapPoint_addin.copy_map_point (Tool)"""
def __init__(self):
self.enabled = True
self.cursor = 3
def onMouseDownMap(self, x, y, button, shift):
"""Copies map x,y to the clip board in degrees, minutes, seconds."""
# Get the spatial reference from the data frame.
mxd = arcpy.mapping.MapDocument('current')
map_sr = mxd.activeDataFrame.spatialReference
# Get the clicked point and reproject it.
map_point = arcpy.PointGeometry(arcpy.Point(x, y), map_sr)
wgs84_sr = arcpy.SpatialReference(4326)
transformation = arcpy.ListTransformations(map_sr, wgs84_sr)
if transformation:
wgs84_pt = map_point.projectAs(wgs84_sr, transformation[0])
else:
wgs84_pt = map_point.projectAs(wgs84_sr)
# Set the hemisphere indicators.
if wgs84_pt.firstPoint.X > 0:
east_or_west = 'E'
else:
east_or_west = 'W'
if wgs84_pt.firstPoint.Y < 0:
south_or_north = 'S'
else:
south_or_north = 'N'
# Get the lat/long values in the required format.
x_dms = dd_to_dms(wgs84_pt.firstPoint.X)
y_dms = dd_to_dms(wgs84_pt.firstPoint.Y)
add_to_clip_board("""{} {} {}{} {} {} {}{}""".format(x_dms[0], x_dms[1], x_dms[2], east_or_west, y_dms[0], y_dms[1], y_dms[2], south_or_north))
# Our new wrapped versions of os.startfile and webbrowser.open startfile = run_in_other_thread(os.startfile)
open_browser = run_in_other_thread(webbrowser.open)
open_browser("www.maps.google.com")
|
prculley/gramps
|
refs/heads/master
|
gramps/gen/lib/test/date_test.py
|
8
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Donald N. Allingham
# Copyright (C) 2013-2014 Vassilii Khachaturov
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" Unittest for testing dates """
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import unittest
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ...config import config
from ...datehandler import get_date_formats, set_format
from ...datehandler import parser as _dp
from ...datehandler import displayer as _dd
from ...datehandler._datedisplay import DateDisplayEn
from ...lib.date import Date, DateError, Today, calendar_has_fixed_newyear
date_tests = {}
# first the "basics".
testset = "basic test"
dates = []
calendar = Date.CAL_GREGORIAN
for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED):
for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT):
for month in range(1,13):
d = Date()
d.set(quality,modifier,calendar,(4,month,1789,False),"Text comment")
dates.append( d)
for modifier in (Date.MOD_RANGE, Date.MOD_SPAN):
for month1 in range(1,13):
for month2 in range(1,13):
d = Date()
d.set(quality,modifier,calendar,(4,month1,1789,False,5,month2,1876,False),"Text comment")
dates.append( d)
modifier = Date.MOD_TEXTONLY
d = Date()
d.set(quality,modifier,calendar,Date.EMPTY,"This is a textual date")
dates.append( d)
date_tests[testset] = dates
# incomplete dates (day or month missing)
testset = "partial date"
dates = []
calendar = Date.CAL_GREGORIAN
for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED):
for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT):
d = Date()
d.set(quality,modifier,calendar,(0,11,1789,False),"Text comment")
dates.append( d)
d = Date()
d.set(quality,modifier,calendar,(0,0,1789,False),"Text comment")
dates.append( d)
for modifier in (Date.MOD_RANGE, Date.MOD_SPAN):
d = Date()
d.set(quality,modifier,calendar,(4,10,1789,False,0,11,1876,False),"Text comment")
dates.append( d)
d = Date()
d.set(quality,modifier,calendar,(4,10,1789,False,0,0,1876,False),"Text comment")
dates.append( d)
d = Date()
d.set(quality,modifier,calendar,(0,10,1789,False,5,11,1876,False),"Text comment")
dates.append( d)
d = Date()
d.set(quality,modifier,calendar,(0,10,1789,False,0,11,1876,False),"Text comment")
dates.append( d)
d = Date()
d.set(quality,modifier,calendar,(0,10,1789,False,0,0,1876,False),"Text comment")
dates.append( d)
d = Date()
d.set(quality,modifier,calendar,(0,0,1789,False,5,11,1876,False),"Text comment")
dates.append( d)
d = Date()
d.set(quality,modifier,calendar,(0,0,1789,False,0,11,1876,False),"Text comment")
dates.append( d)
d = Date()
d.set(quality,modifier,calendar,(0,0,1789,False,0,0,1876,False),"Text comment")
dates.append( d)
date_tests[testset] = dates
# slash-dates
testset = "slash-dates"
dates = []
calendar = Date.CAL_GREGORIAN
for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED):
for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT):
# normal date
d = Date()
d.set(quality,modifier,calendar,(4,11,1789,True),"Text comment")
dates.append( d)
for modifier in (Date.MOD_RANGE, Date.MOD_SPAN):
d = Date()
d.set(quality,modifier,calendar,(4,11,1789,True,5,10,1876,False),"Text comment")
dates.append( d)
d = Date()
d.set(quality,modifier,calendar,(4,11,1789,False,5,10,1876,True),"Text comment")
dates.append( d)
d = Date()
d.set(quality,modifier,calendar,(4,11,1789,True,5,10,1876,True),"Text comment")
dates.append( d)
date_tests[testset] = dates
# BCE
testset = "B. C. E."
dates = []
calendar = Date.CAL_GREGORIAN
for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED):
for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT):
# normal date
d = Date()
d.set(quality,modifier,calendar,(4,11,-90,False),"Text comment")
dates.append( d)
for modifier in (Date.MOD_RANGE, Date.MOD_SPAN):
d = Date()
d.set(quality,modifier,calendar,(5,10,-90,False,4,11,-90,False),"Text comment")
dates.append( d)
d = Date()
date_tests[testset] = dates
# test for all other different calendars
testset = "Non-gregorian"
dates = []
for calendar in (Date.CAL_JULIAN,
Date.CAL_HEBREW,
Date.CAL_ISLAMIC,
Date.CAL_FRENCH,
Date.CAL_PERSIAN,
):
for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED):
for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT):
d = Date()
d.set(quality,modifier,calendar,(4,11,1789,False),"Text comment")
dates.append( d)
for modifier in (Date.MOD_RANGE, Date.MOD_SPAN):
d = Date()
d.set(quality,modifier,calendar,(4,10,1789,False,5,11,1876,False),"Text comment")
dates.append( d)
# CAL_SWEDISH - Swedish calendar 1700-03-01 -> 1712-02-30!
class Context:
def __init__(self, retval):
self.retval = retval
def __enter__(self):
return self.retval
def __exit__(self, *args, **kwargs):
pass
with Context(Date.CAL_SWEDISH) as calendar:
for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED):
for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT):
d = Date()
d.set(quality,modifier,calendar,(4,11,1700,False),"Text comment")
dates.append( d)
for modifier in (Date.MOD_RANGE, Date.MOD_SPAN):
d = Date()
d.set(quality,modifier,calendar,(4,10,1701,False,
5,11,1702,False),"Text comment")
dates.append( d)
quality = Date.QUAL_NONE
modifier = Date.MOD_NONE
for calendar in (Date.CAL_JULIAN,
Date.CAL_ISLAMIC,
Date.CAL_PERSIAN,
):
for month in range(1,13):
d = Date()
d.set(quality,modifier,calendar,(4,month,1789,False),"Text comment")
dates.append( d)
for calendar in (Date.CAL_HEBREW, Date.CAL_FRENCH):
for month in range(1,14):
d = Date()
d.set(quality,modifier,calendar,(4,month,1789,False),"Text comment")
dates.append( d)
date_tests[testset] = dates
swedish_dates = []
# CAL_SWEDISH - Swedish calendar 1700-03-01 -> 1712-02-30!
with Context(Date.CAL_SWEDISH) as calendar:
for year in range(1701, 1712):
for month in range(1,13):
d = Date()
d.set(quality,modifier,calendar,(4,month,year,False),"Text comment")
swedish_dates.append( d)
#-------------------------------------------------------------------------
#
# BaseDateTest
#
#-------------------------------------------------------------------------
class BaseDateTest(unittest.TestCase):
"""
Base class for all date tests.
"""
def setUp(self):
config.set('behavior.date-before-range', 9999)
config.set('behavior.date-after-range', 9999)
config.set('behavior.date-about-range', 10)
#-------------------------------------------------------------------------
#
# ParserDateTest
#
#-------------------------------------------------------------------------
class ParserDateTest(BaseDateTest):
"""
Date displayer and parser tests.
"""
def do_case(self, testset):
for date_format in range(len(get_date_formats())):
set_format(date_format)
for dateval in date_tests[testset]:
datestr = _dd.display(dateval)
ndate = _dp.parse(datestr)
self.assertTrue(dateval.is_equal(ndate),
"dateval fails is_equal in format %d:\n"
" '%s' != '%s'\n"
" '%s' != '%s'\n" %
(date_format, dateval, ndate,
dateval.__dict__, ndate.__dict__))
def test_basic(self):
self.do_case("basic test")
def test_partial(self):
self.do_case("partial date")
def test_slash(self):
self.do_case("slash-dates")
def test_bce(self):
self.do_case("B. C. E.")
def test_non_gregorian(self):
self.do_case("Non-gregorian")
#-------------------------------------------------------------------------
#
# MatchDateTest
#
#-------------------------------------------------------------------------
ENGLISH_DATE_HANDLER = (_dd.__class__ == DateDisplayEn)
@unittest.skipUnless(ENGLISH_DATE_HANDLER,
"This test of Date() matching logic can only run in English locale.")
class MatchDateTest(BaseDateTest):
"""
Date match tests.
"""
tests = [("before 1960", "before 1961", True),
("before 1960", "before 1960", True),
("before 1961", "before 1961", True),
("jan 1, 1960", "jan 1, 1960", True),
("dec 31, 1959", "dec 31, 1959", True),
("before 1960", "jan 1, 1960", False),
("before 1960", "dec 31, 1959", True),
("abt 1960", "1960", True),
("abt 1960", "before 1960", True),
("1960", "1960", True),
("1960", "after 1960", False),
("1960", "before 1960", False),
("abt 1960", "abt 1960", True),
("before 1960", "after 1960", False),
("after jan 1, 1900", "jan 2, 1900", True),
("abt jan 1, 1900", "jan 1, 1900", True),
("from 1950 to 1955", "1950", True),
("from 1950 to 1955", "1951", True),
("from 1950 to 1955", "1952", True),
("from 1950 to 1955", "1953", True),
("from 1950 to 1955", "1954", True),
("from 1950 to 1955", "1955", True),
("from 1950 to 1955", "1956", False),
("from 1950 to 1955", "dec 31, 1955", True),
("from 1950 to 1955", "jan 1, 1955", True),
("from 1950 to 1955", "dec 31, 1949", False),
("from 1950 to 1955", "jan 1, 1956", False),
("after jul 4, 1980", "jul 4, 1980", False),
("after jul 4, 1980", "before jul 4, 1980", False),
("after jul 4, 1980", "about jul 4, 1980", True),
("after jul 4, 1980", "after jul 4, 1980", True),
("between 1750 and 1752", "1750", True),
("between 1750 and 1752", "about 1750", True),
("between 1750 and 1752", "between 1749 and 1750", True),
("between 1750 and 1752", "1749", False),
("invalid date", "invalid date", True),
("invalid date", "invalid", False, True),
("invalid date 1", "invalid date 2", False),
("abt jan 1, 2000", "dec 31, 1999", True),
("jan 1, 2000", "dec 31, 1999", False),
("aft jan 1, 2000", "dec 31, 1999", False),
("after jan 1, 2000", "after dec 31, 1999", True),
("after dec 31, 1999", "after jan 1, 2000", True),
("1 31, 2000", "jan 1, 2000", False),
("dec 31, 1999", "jan 1, 2000", False),
("jan 1, 2000", "before dec 31, 1999", False),
("aft jan 1, 2000", "before dec 31, 1999", False),
("before jan 1, 2000", "after dec 31, 1999", False),
("jan 1, 2000/1", "jan 1, 2000", False),
("jan 1, 2000/1", "jan 1, 2001", False),
("jan 1, 2000/1", "jan 1, 2000/1", True),
("jan 1, 2000/1", "jan 14, 2001", True),
("jan 1, 2000/1", "jan 1, 2001 (julian)", True),
("about 1984", "about 2005", False),
("about 1990", "about 2005", True),
("about 2007", "about 2006", True),
("about 1995", "after 2000", True),
("about 1995", "after 2005", False),
("about 2007", "about 2003", True),
("before 2007", "2000", True),
# offsets
# different calendar, same date
("1800-8-3", "15 Thermidor 8 (French Republican)", True),
("after 1800-8-3", "before 15 Thermidor 8 (French Republican)", False),
("ab cd", "54 ab cd 2000", True, False),
("1700-02-29 (Julian)", "1700-03-01 (Swedish)", True),
("1706-12-31 (Julian)", "1707-01-01 (Swedish)", True),
("1712-02-28 (Julian)", "1712-02-29 (Swedish)", True),
("1712-02-29 (Julian)", "1712-02-30 (Swedish)", True),
# See bug# 7100
("1233-12-01", "1234-12-01 (Mar25)", True),
("1234-01-04", "1234-01-04 (Mar25)", True),
# See bug# 7158
# Some issues passing Travis close to midnight; not sure why:
# ("today", Today(), True),
# ("today (Hebrew)", Today(), True),
("today", "today", True),
(Today(), Today(), True),
# See bug# 7197
("1788-03-27", "1789-03-27 (Mar25)", True),
("1788-03-27 (Julian)", "1789-03-27 (Julian, Mar25)", True),
]
def convert_to_date(self, d):
return d if isinstance(d,Date) else _dp.parse(d)
def do_case(self, d1, d2, expected1, expected2=None):
"""
Tests two Gramps dates to see if they match.
"""
if expected2 is None:
expected2 = expected1
self.assertMatch(d1, d2, expected1)
self.assertMatch(d2, d1, expected2)
def assertMatch(self, d1, d2, expected):
date1 = self.convert_to_date(d1)
date2 = self.convert_to_date(d2)
result = date2.match(date1)
self.assertEqual(result, expected,
"'{}' {} '{}'\n({} vs {})".format(
d1,
("did not match" if expected else "matched"),
d2,
date1.__dict__, date2.__dict__))
def test_match(self):
for testdata in self.tests:
self.do_case(*testdata)
#-------------------------------------------------------------------------
#
# ArithmeticDateTest
#
#-------------------------------------------------------------------------
class ArithmeticDateTest(BaseDateTest):
"""
Date arithmetic tests.
"""
tests = [
# Date +/- int/tuple -> Date
("Date(2008, 1, 1) - 1", "Date(2007, 1, 1)"),
("Date(2008, 1, 1) + 1", "Date(2009, 1, 1)"),
("Date(2008, 1, 1) - (0,0,1)", "Date(2007, 12, 31)"),
("Date(2008, 1, 1) - (0,0,2)", "Date(2007, 12, 30)"),
("Date(2008) - (0,0,1)", "Date(2007, 12, 31)"),
("Date(2008) - 1", "Date(2007, 1, 1)"),
("Date(2008, 12, 31) + (0, 0, 1)", "Date(2009, 1, 1)"),
("Date(2000,1,1) - (0,11,0)", "Date(1999, 2, 1)"),
("Date(2000,1,1) - (0,1,0)", "Date(1999, 12, 1)"),
("Date(2008, 1, 1) + (0, 0, 32)", "Date(2008, 2, 2)"),
("Date(2008, 2, 1) + (0, 0, 32)", "Date(2008, 3, 4)"),
("Date(2000) - (0, 1, 0)", "Date(1999, 12, 1)"),
("Date(2000) + (0, 1, 0)", "Date(2000, 1, 0)"), # Ok?
("Date(2000, 1, 1) - (0, 1, 0)", "Date(1999, 12, 1)"),
("Date(2000, 1, 1) - 1", "Date(1999, 1, 1)"),
("Date(2000) - 1", "Date(1999)"),
("Date(2000) + 1", "Date(2001)"),
# Date +/- Date -> Span
("(Date(1876,5,7) - Date(1876,5,1)).tuple()", "(0, 0, 6)"),
("(Date(1876,5,7) - Date(1876,4,30)).tuple()", "(0, 0, 7)"),
("(Date(2000,1,1) - Date(1999,2,1)).tuple()", "(0, 11, 0)"),
("(Date(2000,1,1) - Date(1999,12,1)).tuple()", "(0, 1, 0)"),
("(Date(2007, 12, 23) - Date(1963, 12, 4)).tuple()", "(44, 0, 19)"),
("(Date(1963, 12, 4) - Date(2007, 12, 23)).tuple()", "(-44, 0, -19)"),
]
def test_evaluate(self):
for exp1, exp2 in self.tests:
val1 = eval(exp1)
val2 = eval(exp2)
self.assertEqual(val1, val2,
"'%s' should be '%s' but was '%s'" % (exp1, val2, val1))
#-------------------------------------------------------------------------
#
# SwedishDateTest
#
#-------------------------------------------------------------------------
class SwedishDateTest(BaseDateTest):
"""
Swedish calendar tests.
"""
def test_swedish(self):
for date in swedish_dates:
self.assertEqual(date.sortval,
date.to_calendar('gregorian').sortval)
class Test_set2(BaseDateTest):
"""
Test the Date.set2_... setters -- the ones to manipulate the 2nd date
of a compound date
"""
def setUp(self):
self.date = d = Date()
d.set(modifier=Date.MOD_RANGE,
#d m y sl--d m y sl
value=(1, 1, 2000, 0, 1, 1, 2010, 0))
def testStartStopSanity(self):
start,stop = self.date.get_start_stop_range()
self.assertEqual(start, (2000, 1, 1))
self.assertEqual(stop, (2010, 1, 1))
def test_set2_ymd_overrides_stop_date(self):
self.date.set2_yr_mon_day(2013, 2, 2)
start,stop = self.date.get_start_stop_range()
self.assertEqual(start, (2000, 1, 1))
self.assertEqual(stop, (2013, 2, 2))
def test_set_ymd_overrides_both_dates(self):
self.date.set_yr_mon_day(2013, 2, 2, remove_stop_date = True)
start,stop = self.date.get_start_stop_range()
self.assertEqual(start, stop)
self.assertEqual(stop, (2013, 2, 2))
def test_set_ymd_offset_updates_both_ends(self):
self.date.set_yr_mon_day_offset(+2, +2, +2)
start,stop = self.date.get_start_stop_range()
self.assertEqual(start, (2002, 3, 3))
self.assertEqual(stop, (2012, 3, 3))
def test_set2_ymd_offset_updates_stop_date(self):
self.date.set2_yr_mon_day_offset(+7, +5, +5)
start,stop = self.date.get_start_stop_range()
self.assertEqual(start, (2000, 1, 1))
self.assertEqual(stop, (2017, 6, 6))
def test_copy_offset_ymd_preserves_orig(self):
copied = self.date.copy_offset_ymd(year=-1)
self.testStartStopSanity()
start,stop = copied.get_start_stop_range()
self.assertEqual(start, (1999, 1, 1))
self.assertEqual(stop, (2009, 1, 1))
def test_copy_ymd_preserves_orig(self):
copied = self.date.copy_ymd(year=1000, month=10, day=10,
remove_stop_date=True)
self.testStartStopSanity()
start,stop = copied.get_start_stop_range()
self.assertEqual(start, (1000, 10, 10))
self.assertEqual(stop, (1000, 10, 10))
def _test_set2_function_raises_error_unless_compound(self, function):
for mod in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER,
Date.MOD_ABOUT,
Date.MOD_TEXTONLY):
self.date.set_modifier(mod)
try:
function(self.date)
self.assertTrue(False,
"Modifier: {}, dateval: {} - exception expected!".format(
mod, self.date.dateval))
except DateError:
pass
def test_set2_ymd_raises_error_unless_compound(self):
self._test_set2_function_raises_error_unless_compound(
lambda date: date.set2_yr_mon_day(2013, 2, 2))
def test_set2_ymd_offset_raises_error_unless_compound(self):
self._test_set2_function_raises_error_unless_compound(
lambda date: date.set2_yr_mon_day_offset(year=-1))
class Test_set_newyear(BaseDateTest):
def test_raises_error_iff_calendar_has_fixed_newyear(self):
for cal in Date.CALENDARS:
d = Date(1111,2,3)
should_raise = calendar_has_fixed_newyear(cal)
message = "{name} {cal}".format(
name = Date.calendar_names[cal],
cal = cal)
try:
d.set(calendar=cal, newyear=2)
self.assertFalse(should_raise, message)
except DateError:
self.assertTrue(should_raise, message)
#-------------------------------------------------------------------------
#
# EmptyDateTest
#
#-------------------------------------------------------------------------
class EmptyDateTest(BaseDateTest):
"""
Tests for empty dates.
"""
def test_empty(self):
d = Date()
self.assertTrue(d.is_empty())
def test_text_only_empty(self):
d = Date()
d.set(text='First of Jan',
modifier=Date.MOD_TEXTONLY)
self.assertFalse(d.is_empty())
def test_single_empty(self):
d = Date()
d.set(value=(1, 1, 1900, False),
modifier=Date.MOD_NONE)
self.assertFalse(d.is_empty())
def test_range_empty(self):
d = Date()
d.set(value=(1, 1, 1900, False, 1, 1, 1910, False),
modifier=Date.MOD_RANGE)
self.assertFalse(d.is_empty())
def test_span_empty(self):
d = Date()
d.set(value=(1, 1, 1900, False, 1, 1, 1910, False),
modifier=Date.MOD_SPAN)
self.assertFalse(d.is_empty())
if __name__ == "__main__":
unittest.main()
|
jsr38/necpp
|
refs/heads/master
|
testharness/python/test_get_gain.py
|
2
|
from necpp import *
import unittest
class TestDipoleGain(unittest.TestCase):
def handle_nec(self, result):
if (result != 0):
print nec_error_message()
self.assertEqual(result,0)
def test_example4(self):
'''
CEEXAMPLE 4. T ANTENNA ON A BOX OVER PERFECT GROUND
SP 0 0 .1 .05 .05 0. 0. .01
SP 0 0 .05 .1 .05 0. 90. .01
GX 0 110
SP 0 0 0. 0. .1 90. 0. .04
GW 1 4 0. 0. .1 0. 0. .3 .001
GW 2 2 0. 0. .3 .15 0. .3 .001
GW 3 2 0. 0. .3 -.15 0. .3 .001
GE 1
GN 1
EX 0 1 1 0 1.
RP 0 10 4 1001 0. 0. 10. 30.
EN
'''
nec = nec_create()
self.handle_nec(nec_sp_card(nec, 0, 0.1, 0.05, 0.05, 0.0, 0.0, 0.01))
self.handle_nec(nec_sp_card(nec, 0, .05, .1, .05, 0.0, 90.0, 0.01))
self.handle_nec(nec_gx_card(nec, 0, 110))
self.handle_nec(nec_sp_card(nec, 0, 0.0, 0.0, 0.1, 90.0, 0.0, 0.04))
self.handle_nec(nec_wire(nec, 1, 4, 0., 0.0, 0.1, 0.0, 0.0, 0.3, .001, 1.0, 1.0))
self.handle_nec(nec_wire(nec, 2, 2, 0., 0.0, 0.3, 0.15, 0.0, 0.3, .001, 1.0, 1.0))
self.handle_nec(nec_wire(nec, 3, 2, 0., 0.0, 0.3, -.15, 0.0, 0.3, .001, 1.0, 1.0))
self.handle_nec(nec_geometry_complete(nec, 1))
self.handle_nec(nec_gn_card(nec, 1, 0, 0, 0, 0, 0, 0, 0))
self.handle_nec(nec_ex_card(nec, 0, 1, 1, 0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
self.handle_nec(nec_rp_card(nec, 0,10,4,1,0,0,1,0.0,0.0,10.0,30.0, 0, 0))
self.assertAlmostEqual(nec_gain_max(nec,0),5.076,3)
gmax = -999.0
for theta_index in range(0,10):
for phi_index in range(0,4):
g = nec_gain(nec,0,theta_index, phi_index)
gmax = max(g, gmax)
self.assertAlmostEqual(gmax, nec_gain_max(nec,0), 5 )
nec_delete(nec)
if __name__ == '__main__':
unittest.main()
|
zackmdavis/Finetooth
|
refs/heads/master
|
core/colorize.py
|
1
|
import itertools
from typing import Dict, List, Union
def diffract(hex_encoding: str) -> List[int]:
return [int(band, 16) for band in (hex_encoding[i:i+2] for i in (0, 2, 4))]
def undiffract(rgb: Union[List[int], List[float]]) -> str:
return "".join(hex(int(band))[2:].zfill(2) for band in rgb)
def interpolate(rgb1: List[int], rgb2: List[int], weighting: float) -> List[float]:
return list(map(lambda c1, c2: c1 + weighting*(c2 - c1), rgb1, rgb2))
def interpolate_stop(color_stops: Dict[int, str], x: int) -> str:
stops = sorted(color_stops.keys())
closest_above = min(stop for stop in stops if (stop - x) > 0)
closest_below = max(stop for stop in stops if (stop - x) < 0)
diffracted_above, diffracted_below = [
diffract(color_stops[s]) for s in (closest_above, closest_below)
]
weighting = (x - closest_below) / (closest_above - closest_below)
return undiffract(
interpolate(diffracted_below, diffracted_above, weighting)
)
def populate_stops(color_stops: Dict[int, str]) -> Dict[int, str]:
full_stops = color_stops.copy()
stops = color_stops.keys()
heroism = max(stops)
villainy = min(stops)
for moral_quality in range(villainy, heroism + 1):
if moral_quality not in color_stops:
full_stops[moral_quality] = interpolate_stop(
color_stops, moral_quality
)
return full_stops
def style_block(data_attribute, style_property, state, color):
return "\n".join(["[data-{}=\"{}\"] {{".format(data_attribute, state),
" {}: #{};".format(style_property, color),
"}\n"])
def value_style_block(value, color):
return style_block("value", "color", value, color)
def mark_style_block(mark, color):
return style_block("mark", "background-color", mark, color)
def stylesheet(low_score, low_color, high_score, high_color):
stops = {0: "000000"}
if low_score < 0:
stops.update({low_score: low_color})
if high_score > 0:
stops.update({high_score: high_color})
colors = populate_stops(stops)
return "\n".join(
itertools.chain(
(value_style_block(value, color)
for value, color in colors.items()),
(mark_style_block(mark, color)
for mark, color in ((-1, "FFD6D6"), (1, "D6D6FF")))
)
)
|
JayVora-SerpentCS/connector-redmine
|
refs/heads/8.0
|
unit/__init__.py
|
1
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 - Present Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import backend_adapter
from . import binder
from . import import_synchronizer
from . import mapper
|
SeanMcGrath/ScopeOut
|
refs/heads/master
|
scopeout/utilities.py
|
1
|
"""
Scope Finder
=================
Polls serial ports to find compatible oscilloscopes and returns them
as Oscilloscope objects.
"""
import logging
import re
from visa import ResourceManager, VisaIOError
from scopeout import oscilloscopes
class ScopeFinder:
def __init__(self):
"""
Constructor
"""
self.logger = logging.getLogger('scopeout.utilities.ScopeFinder')
self.logger.info('ScopeFinder Initialized')
self.resource_manager = ResourceManager()
self.resources = []
self.instruments = []
self.scopes = []
self.blacklist = set()
self.refresh()
def __enter__(self):
# Entry point for the *with* statement, which allows this object to close properly on program exit.
return self
def __exit__(self, type, value, traceback):
# Exit point for with statement
pass
def query(self, inst, command):
"""
Issues query to instrument and returns response.
Parameters:
:inst: the instrument to be queried.
:command: the command to be issued.
:Returns: the response of inst as a string.
"""
return inst.query(command).strip() # strip newline
def get_scopes(self):
"""
Getter for array of connected oscilloscopes.
:Returns: an array of PyVisa instrument objects representing USB oscilloscopes connected to the computer.
"""
return self.scopes
def refresh(self):
"""
Re-run scope acquisition to update scope array.
:Returns: the ScopeFinder object, for convenience.
"""
self.scopes = []
self.resources = []
try:
self.resources = self.resource_manager.list_resources()
except VisaIOError as e:
self.resources = []
if self.resources:
self.logger.info("%d VISA Resource(s) found", len(self.resources))
self.instruments = []
for resource in set(self.resources) - self.blacklist:
try:
inst = self.resource_manager.open_resource(resource)
self.instruments.append(inst)
self.logger.info('Resource {} converted to instrument'.format(resource))
except Exception as e:
self.logger.error(e)
self.blacklist.add(resource)
for ins in self.instruments:
try:
info = self.query(ins, '*IDN?').split(',') # Parse identification string
if info[1] == 'TDS 2024B': # TDS 2024B oscilloscope
info.append(info.pop().split()[1][3:]) # get our identification string into array format
scope = oscilloscopes.TDS2024B(ins, info[1], info[2], info[3])
self.scopes.append(scope)
self.logger.info("Found %s", str(scope))
elif re.match('GDS-1.*A', info[1]):
scope = oscilloscopes.GDS1000A(ins, info[1], info[2], info[3])
self.scopes.append(scope)
self.logger.info("Found %s", str(scope))
elif re.match('GDS-2.*A', info[1]):
scope = oscilloscopes.GDS2000A(ins, info[1], info[2], info[3])
self.scopes.append(scope)
self.logger.info("Found %s", str(scope))
# Support for other scopes to be implemented here!
except VisaIOError or IndexError:
self.logger.error('{} could not be converted to an oscilloscope'.format(ins))
return self
def check_scope(self, scope_index):
"""
Check if the scope at scopeIndex is still connected.
Parameters:
:scopeIndex: the index of the scopes array to check.
:Returns: True if connected, false otherwise
"""
try:
if self.scopes[scope_index].getTriggerStatus():
return True
else:
return False
except:
return False
|
jason-z-hang/airflow
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
# Kept manually in sync with airflow.__version__
version = '1.5.2'
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
crypto = ['cryptography>=0.9.3']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
druid = ['pydruid>=0.2.1']
hdfs = ['snakebite>=2.4.13']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'pyhs2>=0.6.0',
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.13.0']
mysql = ['mysqlclient>=1.3.6']
optional = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.6']
s3 = ['boto>=2.36.0']
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=0.15']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica
devel = all_dbs + doc + samba + s3 + ['nose'] + slack + crypto + oracle
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini']},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.0, <0.9',
'chartkick>=0.4.2, < 0.5',
'dill>=0.2.2, <0.3',
'flask>=0.10.1, <0.11',
'flask-admin==1.2.0',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'future>=0.15.0, <0.16',
'gunicorn>=19.3.0, <20.0',
'jinja2>=2.7.3, <3.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.15.2, <1.0.0',
'pygments>=2.0.1, <3.0',
'python-dateutil>=2.3, <3',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'thrift>=0.9.2, <0.10',
],
extras_require={
'all': devel + optional,
'all_dbs': all_dbs,
'celery': celery,
'crypto': crypto,
'devel': devel,
'doc': doc,
'druid': druid,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'postgres': postgres,
's3': s3,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
},
author='Maxime Beauchemin',
author_email='maximebeauchemin@gmail.com',
url='https://github.com/airbnb/airflow',
download_url=(
'https://github.com/airbnb/airflow/tarball/' + version),
cmdclass={'test': Tox},
)
|
suncycheng/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyArgumentListInspection/badarglist.py
|
7
|
# bad argument list samples
class A:
def foo(self, x, y):
pass
# no self, but so what
def bar(one, two):
pass
a = A()
a.foo(1,2)
a.bar(<warning descr="Parameter 'two' unfilled">)</warning>;
def f1():
pass
f1()
f1<warning descr="Unexpected argument(s)">(<warning descr="Unexpected argument">1</warning>)</warning>
f1<warning descr="Unexpected argument(s)">(<warning descr="Unexpected argument">a = 1</warning>)</warning>
def f2(a):
pass
f2(<warning descr="Parameter 'a' unfilled">)</warning> # ok, fail
f2(1) # ok, pass
f2<warning descr="Unexpected argument(s)">(1, <warning descr="Unexpected argument">2</warning>)</warning> # ok, fail
f2(a = 1) # ok, pass
f2(<warning descr="Unexpected argument">b = 1</warning><warning descr="Parameter 'a' unfilled">)</warning> # ok, fail
f2<warning descr="Unexpected argument(s)">(a = 1, <warning descr="Unexpected argument">b = 2</warning>)</warning> # ok, fail
def f3(a, b):
pass
f3(1, 2)
f3<warning descr="Unexpected argument(s)">(1, 2, <warning descr="Unexpected argument">3</warning>)</warning>
f3(b=2, a=1)
f3<warning descr="Unexpected argument(s)">(b=1, <error descr="Keyword argument repeated">b=2</error>, a=1)</warning>
f3(1, b=2)
f3(a=1, <error descr="Positional argument after keyword argument">2</error><warning descr="Parameter 'b' unfilled">)</warning>
def f4(a, *b):
pass
f4(1)
f4(1, 2)
f4(1, 2, 3)
f4(1, *(2, 3))
f4(*(1,2,3))
f4(a=1, <error descr="Positional argument after keyword argument">2</error>, <error descr="Positional argument after keyword argument">3</error>)
|
ace8957/SeniorDesignKernel
|
refs/heads/spitest
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
|
4653
|
# EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
|
blrm/robottelo
|
refs/heads/master
|
tests/foreman/smoke/test_cli_smoke.py
|
2
|
"""Smoke tests for the ``CLI`` end-to-end scenario."""
import random
from fauxfactory import gen_alphanumeric, gen_ipaddr
from robottelo import manifests, ssh
from robottelo.cli.activationkey import ActivationKey
from robottelo.cli.computeresource import ComputeResource
from robottelo.cli.contentview import ContentView
from robottelo.cli.domain import Domain
from robottelo.cli.environment import Environment
from robottelo.cli.factory import (
make_user, make_org, make_lifecycle_environment, make_content_view,
make_activation_key)
from robottelo.cli.hostgroup import HostGroup
from robottelo.cli.lifecycleenvironment import LifecycleEnvironment
from robottelo.cli.location import Location
from robottelo.cli.org import Org
from robottelo.cli.product import Product
from robottelo.cli.puppetmodule import PuppetModule
from robottelo.cli.repository_set import RepositorySet
from robottelo.cli.repository import Repository
from robottelo.cli.subnet import Subnet
from robottelo.cli.subscription import Subscription
from robottelo.cli.user import User
from robottelo.constants import (
DEFAULT_LOC,
DEFAULT_ORG,
DEFAULT_SUBSCRIPTION_NAME,
FAKE_0_PUPPET_REPO,
GOOGLE_CHROME_REPO,
PRDS,
REPOS,
REPOSET,
)
from robottelo.config import settings
from robottelo.datafactory import generate_strings_list
from robottelo.decorators import skip_if_not_set
from robottelo.helpers import get_server_software
from robottelo.test import CLITestCase
from robottelo.vm import VirtualMachine
# (too many public methods) pylint: disable=R0904
class SmokeTestCase(CLITestCase):
"""End-to-end tests using the ``CLI`` path."""
def test_positive_find_default_org(self):
"""Check if 'Default Organization' is present
@Feature: Smoke Test
@Assert: 'Default Organization' is found
"""
query = {u'name': DEFAULT_ORG}
result = self._search(Org, query)
self.assertEqual(result['name'], DEFAULT_ORG)
def test_positive_find_default_loc(self):
"""Check if 'Default Location' is present
@Feature: Smoke Test
@Assert: 'Default Location' is found
"""
query = {u'name': DEFAULT_LOC}
result = self._search(Location, query)
self.assertEqual(result['name'], DEFAULT_LOC)
def test_positive_find_admin_user(self):
"""Check if Admin User is present
@Feature: Smoke Test
@Assert: Admin User is found and has Admin role
"""
query = {u'login': u'admin'}
result = self._search(User, query)
self.assertEqual(result['login'], 'admin')
self.assertEqual(result['admin'], 'yes')
def test_positive_foreman_version(self):
"""Check if /usr/share/foreman/VERSION does not contain the
develop tag.
@Feature: Smoke Test
@Assert: The file content does not have the develop tag.
"""
result = ssh.command('cat /usr/share/foreman/VERSION')
self.assertEqual(result.return_code, 0)
if get_server_software() == 'downstream':
self.assertNotIn('develop', u''.join(result.stdout))
else:
self.assertIn('develop', u''.join(result.stdout))
def test_positive_smoke(self):
"""Check that basic content can be created
* Create a new user with admin permissions
* Using the new user from above.
* Create a new organization
* Create two new lifecycle environments
* Create a custom product
* Create a custom YUM repository
* Create a custom PUPPET repository
* Synchronize both custom repositories
* Create a new content view
* Associate both repositories to new content view
* Publish content view
* Promote content view to both lifecycles
* Create a new libvirt compute resource
* Create a new subnet
* Create a new domain
* Create a new hostgroup and associate previous entities to it
@Feature: Smoke Test
@Assert: All entities are created and associated.
"""
# Create new user
password = gen_alphanumeric()
new_user = make_user({u'admin': u'true', u'password': password})
# Append the password as the info command does not return it
new_user[u'password'] = password
# Create new org as new user
new_org = self._create(
new_user,
Org,
{u'name': gen_alphanumeric()}
)
# Create new lifecycle environment 1
lifecycle1 = self._create(
new_user,
LifecycleEnvironment,
{
u'name': gen_alphanumeric(),
u'organization-id': new_org['id'],
u'prior': u'Library',
}
)
# Create new lifecycle environment 2
lifecycle2 = self._create(
new_user,
LifecycleEnvironment,
{
u'name': gen_alphanumeric(),
u'organization-id': new_org['id'],
u'prior': lifecycle1['name'],
}
)
# Create a new product
new_product = self._create(
new_user,
Product,
{
u'name': gen_alphanumeric(),
u'organization-id': new_org['id'],
}
)
# Create a YUM repository
new_repo1 = self._create(
new_user,
Repository,
{
u'content-type': u'yum',
u'name': gen_alphanumeric(),
u'product-id': new_product['id'],
u'publish-via-http': u'true',
u'url': GOOGLE_CHROME_REPO,
}
)
# Create a Puppet repository
new_repo2 = self._create(
new_user,
Repository,
{
u'content-type': u'puppet',
u'name': gen_alphanumeric(),
u'product-id': new_product['id'],
u'publish-via-http': u'true',
u'url': FAKE_0_PUPPET_REPO,
}
)
# Synchronize YUM repository
Repository.with_user(
new_user['login'],
new_user['password']
).synchronize({u'id': new_repo1['id']})
# Synchronize puppet repository
Repository.with_user(
new_user['login'],
new_user['password']
).synchronize({u'id': new_repo2['id']})
# Create a Content View
new_cv = self._create(
new_user,
ContentView,
{
u'name': gen_alphanumeric(),
u'organization-id': new_org['id'],
}
)
# Associate yum repository to content view
ContentView.with_user(
new_user['login'],
new_user['password']
).add_repository({
u'id': new_cv['id'],
u'repository-id': new_repo1['id'],
})
# Fetch puppet module
puppet_result = PuppetModule.with_user(
new_user['login'],
new_user['password']
).list({
u'repository-id': new_repo2['id'],
u'per-page': False,
})
# Associate puppet repository to content view
ContentView.with_user(
new_user['login'],
new_user['password']
).puppet_module_add({
u'content-view-id': new_cv['id'],
u'id': puppet_result[0]['id'],
})
# Publish content view
ContentView.with_user(
new_user['login'],
new_user['password']
).publish({u'id': new_cv['id']})
# Only after we publish version1 the info is populated.
result = ContentView.with_user(
new_user['login'],
new_user['password']
).info({u'id': new_cv['id']})
# Let us now store the version1 id
version1_id = result['versions'][0]['id']
# Promote content view to first lifecycle
ContentView.with_user(
new_user['login'],
new_user['password']
).version_promote({
u'id': version1_id,
u'to-lifecycle-environment-id': lifecycle1['id'],
})
# Promote content view to second lifecycle
ContentView.with_user(
new_user['login'],
new_user['password']
).version_promote({
u'id': version1_id,
u'to-lifecycle-environment-id': lifecycle2['id'],
})
# Create a new libvirt compute resource
self._create(
new_user,
ComputeResource,
{
u'name': gen_alphanumeric(),
u'provider': u'Libvirt',
u'url': u'qemu+tcp://{0}:16509/system'.format(
settings.server.hostname),
}
)
# Create a new subnet
new_subnet = self._create(
new_user,
Subnet,
{
u'name': gen_alphanumeric(),
u'network': gen_ipaddr(ip3=True),
u'mask': u'255.255.255.0',
}
)
# Create a domain
new_domain = self._create(
new_user,
Domain,
{
u'name': gen_alphanumeric(),
}
)
# Fetch Puppet environment for second lifecycle
# (unfortunately it is not straight forward to extract this)
# The puppet environment we want has a name like this...
env_name = u'KT_{0}_{1}_'.format(
# Hyphens are replaced by underscores
new_org['label'].replace('-', '_',),
lifecycle2['label'].replace('-', '_')
)
# We fetch all the puppet environments for our organization...
result = Environment.with_user(
new_user['login'],
new_user['password']
).list({
u'search': u'organization="{0}"'.format(new_org['name']),
})
# Now look for the puppet environment that matches lifecycle2
puppet_env = [
env for env in result
if env['name'].startswith(env_name)
]
self.assertEqual(len(puppet_env), 1)
# Create a hostgroup...
new_hg = self._create(
new_user,
HostGroup,
{
u'domain-id': new_domain['id'],
u'environment-id': puppet_env[0]['id'],
u'name': gen_alphanumeric(),
u'subnet-id': new_subnet['id'],
}
)
# ...and add it to the organization
Org.with_user(
new_user['login'],
new_user['password']
).add_hostgroup({
u'hostgroup-id': new_hg['id'],
u'id': new_org['id'],
})
def _create(self, user, entity, attrs):
"""Creates a Foreman entity and returns it.
:param dict user: A python dictionary representing a User
:param obj entity: A valid CLI entity.
:param dict attrs: A python dictionary with attributes to use when
creating entity.
:return: A ``dict`` representing the Foreman entity.
:rtype: dict
"""
# Create new entity as new user
return entity.with_user(
user['login'],
user['password']
).create(attrs)
def _search(self, entity, attrs):
"""Looks up for a Foreman entity by specifying using its ``Info`` CLI
subcommand with ``attrs`` arguments.
:param robottelo.cli.Base entity: A logical representation of a
Foreman CLI entity.
:param string query: A ``search`` parameter.
:return: A ``dict`` representing the Foreman entity.
:rtype: dict
"""
return entity.info(attrs)
@skip_if_not_set('clients')
def test_positive_end_to_end(self):
"""Perform end to end smoke tests using RH repos.
1. Create new organization and environment
2. Upload manifest
3. Sync a RedHat repository
4. Create content-view
5. Add repository to contet-view
6. Promote/publish content-view
7. Create an activation-key
8. Add product to activation-key
9. Create new virtualmachine
10. Pull rpm from Foreman server and install on client
11. Register client with foreman server using activation-key
12. Install rpm on client
@Feature: Smoke test
@Assert: All tests should succeed and Content should be successfully
fetched by client
"""
# Product, RepoSet and repository variables
rhel_product_name = PRDS['rhel']
rhel_repo_set = REPOSET['rhva6']
rhel_repo_name = REPOS['rhva6']['name']
org_name = random.choice(generate_strings_list())
# Create new org and environment
new_org = make_org({u'name': org_name})
new_env = make_lifecycle_environment({
u'organization-id': new_org['id'],
})
# Clone manifest and upload it
with manifests.clone() as manifest:
ssh.upload_file(manifest.content, manifest.filename)
Subscription.upload({
u'file': manifest.filename,
u'organization-id': new_org['id'],
})
# Enable repo from Repository Set
RepositorySet.enable({
u'basearch': 'x86_64',
u'name': rhel_repo_set,
u'organization-id': new_org['id'],
u'product': rhel_product_name,
u'releasever': '6Server',
})
# Fetch repository info
rhel_repo = Repository.info({
u'name': rhel_repo_name,
u'organization-id': new_org['id'],
u'product': rhel_product_name,
})
# Synchronize the repository
Repository.synchronize({
u'name': rhel_repo_name,
u'organization-id': new_org['id'],
u'product': rhel_product_name,
})
# Create CV and associate repo to it
new_cv = make_content_view({u'organization-id': new_org['id']})
ContentView.add_repository({
u'id': new_cv['id'],
u'organization-id': new_org['id'],
u'repository-id': rhel_repo['id'],
})
# Publish a version1 of CV
ContentView.publish({u'id': new_cv['id']})
# Get the CV info
version1_id = ContentView.info({
u'id': new_cv['id']})['versions'][0]['id']
# Store the version1 id
# Promotion of version1 to next env
ContentView.version_promote({
u'id': version1_id,
u'to-lifecycle-environment-id': new_env['id'],
})
# Create activation key
activation_key = make_activation_key({
u'content-view': new_cv['name'],
u'lifecycle-environment-id': new_env['id'],
u'organization-id': new_org['id'],
})
# List the subscriptions in given org
result = Subscription.list(
{u'organization-id': new_org['id']},
per_page=False
)
self.assertGreater(len(result), 0)
# Get the subscription ID from subscriptions list
subscription_quantity = 0
for subscription in result:
if subscription['name'] == DEFAULT_SUBSCRIPTION_NAME:
subscription_id = subscription['id']
subscription_quantity = int(subscription['quantity'])
self.assertGreater(subscription_quantity, 0)
# Add the subscriptions to activation-key
ActivationKey.add_subscription({
u'id': activation_key['id'],
u'quantity': 1,
u'subscription-id': subscription_id,
})
# Enable product content
ActivationKey.content_override({
u'content-label': 'rhel-6-server-rhev-agent-rpms',
u'id': activation_key['id'],
u'organization-id': new_org['id'],
u'value': '1',
})
# Create VM
package_name = "python-kitchen"
server_name = settings.server.hostname
with VirtualMachine(distro='rhel66') as vm:
# Download and Install rpm
result = vm.run(
"wget -nd -r -l1 --no-parent -A '*.noarch.rpm' http://{0}/pub/"
.format(server_name)
)
self.assertEqual(result.return_code, 0)
result = vm.run(
'rpm -i katello-ca-consumer*.noarch.rpm'
)
self.assertEqual(result.return_code, 0)
# Register client with foreman server using activation-key
result = vm.run(
u'subscription-manager register --activationkey {0} '
'--org {1} --force'
.format(activation_key['name'], new_org['label'])
)
self.assertEqual(result.return_code, 0)
# Install contents from sat6 server
result = vm.run('yum install -y {0}'.format(package_name))
self.assertEqual(result.return_code, 0)
# Verify if package is installed by query it
result = vm.run('rpm -q {0}'.format(package_name))
self.assertEqual(result.return_code, 0)
|
1013553207/django
|
refs/heads/master
|
django/db/backends/postgresql/base.py
|
143
|
"""
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.utils import DatabaseError as WrappedDatabaseError
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import psycopg2 as Database
import psycopg2.extensions
import psycopg2.extras
except ImportError as e:
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
def psycopg2_version():
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.') if v.isdigit())
PSYCOPG2_VERSION = psycopg2_version()
if PSYCOPG2_VERSION < (2, 4, 5):
raise ImproperlyConfigured("psycopg2_version 2.4.5 or newer is required; you have %s" % psycopg2.__version__)
# Some of these import psycopg2, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .utils import utc_tzinfo_factory # isort:skip
from .version import get_version # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
'INETARRAY',
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'interval',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'uuid',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE '%%' || {} || '%%'",
'icontains': "LIKE '%%' || UPPER({}) || '%%'",
'startswith': "LIKE {} || '%%'",
'istartswith': "LIKE UPPER({}) || '%%'",
'endswith': "LIKE '%%' || {}",
'iendswith': "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
}
conn_params.update(settings_dict['OPTIONS'])
conn_params.pop('isolation_level', None)
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS.
if self.isolation_level != connection.isolation_level:
connection.set_session(isolation_level=self.isolation_level)
return connection
def init_connection_state(self):
self.connection.set_client_encoding('UTF8')
conn_timezone_name = self.connection.get_parameter_status('TimeZone')
if conn_timezone_name != self.timezone_name:
cursor = self.connection.cursor()
try:
cursor.execute(self.ops.set_time_zone_sql(), [self.timezone_name])
finally:
cursor.close()
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@property
def _nodb_connection(self):
nodb_connection = super(DatabaseWrapper, self)._nodb_connection
try:
nodb_connection.ensure_connection()
except (DatabaseError, WrappedDatabaseError):
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the default database instead.",
RuntimeWarning
)
settings_dict = self.settings_dict.copy()
settings_dict['NAME'] = settings.DATABASES[DEFAULT_DB_ALIAS]['NAME']
nodb_connection = self.__class__(
self.settings_dict.copy(),
alias=self.alias,
allow_thread_sharing=False)
return nodb_connection
@cached_property
def psycopg2_version(self):
return PSYCOPG2_VERSION
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
|
Lothiraldan/ZeroServices
|
refs/heads/master
|
zeroservices/services/__init__.py
|
1
|
from .http_interface import get_http_interface, BasicAuth
from .http_client import BaseHTTPClient, BasicAuthHTTPClient
|
zanph/zanph
|
refs/heads/master
|
flaskroulette/venv/lib/python2.7/site-packages/wheel/test/test_install.py
|
455
|
# Test wheel.
# The file has the following contents:
# hello.pyd
# hello/hello.py
# hello/__init__.py
# test-1.0.data/data/hello.dat
# test-1.0.data/headers/hello.dat
# test-1.0.data/scripts/hello.sh
# test-1.0.dist-info/WHEEL
# test-1.0.dist-info/METADATA
# test-1.0.dist-info/RECORD
# The root is PLATLIB
# So, some in PLATLIB, and one in each of DATA, HEADERS and SCRIPTS.
import wheel.tool
import wheel.pep425tags
from wheel.install import WheelFile
from tempfile import mkdtemp
import shutil
import os
THISDIR = os.path.dirname(__file__)
TESTWHEEL = os.path.join(THISDIR, 'test-1.0-py2.py3-none-win32.whl')
def check(*path):
return os.path.exists(os.path.join(*path))
def test_install():
tempdir = mkdtemp()
def get_supported():
return list(wheel.pep425tags.get_supported()) + [('py3', 'none', 'win32')]
whl = WheelFile(TESTWHEEL, context=get_supported)
assert whl.supports_current_python(get_supported)
try:
locs = {}
for key in ('purelib', 'platlib', 'scripts', 'headers', 'data'):
locs[key] = os.path.join(tempdir, key)
os.mkdir(locs[key])
whl.install(overrides=locs)
assert len(os.listdir(locs['purelib'])) == 0
assert check(locs['platlib'], 'hello.pyd')
assert check(locs['platlib'], 'hello', 'hello.py')
assert check(locs['platlib'], 'hello', '__init__.py')
assert check(locs['data'], 'hello.dat')
assert check(locs['headers'], 'hello.dat')
assert check(locs['scripts'], 'hello.sh')
assert check(locs['platlib'], 'test-1.0.dist-info', 'RECORD')
finally:
shutil.rmtree(tempdir)
def test_install_tool():
"""Slightly improve coverage of wheel.install"""
wheel.tool.install([TESTWHEEL], force=True, dry_run=True)
|
studio666/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/same-gyp-name/gyptest-default.py
|
318
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Build a .gyp that depends on 2 gyp files with the same name.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('all.gyp', chdir='relocate/src')
expect1 = """\
Hello from main1.cc
"""
expect2 = """\
Hello from main2.cc
"""
if test.format == 'xcode':
chdir1 = 'relocate/src/subdir1'
chdir2 = 'relocate/src/subdir2'
else:
chdir1 = chdir2 = 'relocate/src'
test.run_built_executable('program1', chdir=chdir1, stdout=expect1)
test.run_built_executable('program2', chdir=chdir2, stdout=expect2)
test.pass_test()
|
bwohlberg/sporco
|
refs/heads/master
|
sporco/dictlrn/bpdndl.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2020 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Dictionary learning based on BPDN sparse coding"""
from __future__ import print_function, absolute_import
import copy
import numpy as np
from sporco.util import u
from sporco.admm import bpdn
from sporco.admm import cmod
from sporco.dictlrn import dictlrn
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
class BPDNDictLearn(dictlrn.DictLearn):
r"""
Dictionary learning based on BPDN and CnstrMOD
|
.. inheritance-diagram:: BPDNDictLearn
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_{D, X} \; (1/2) \| D X - S \|_F^2 + \lambda \|
X \|_1 \quad \text{such that} \quad \|\mathbf{d}_m\|_2 = 1
via interleaved alternation between the ADMM steps of the
:class:`.admm.bpdn.BPDN` and :class:`.admm.cmod.CnstrMOD` problems.
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`(1/2) \| D X - S \|_F^2`
``RegL1`` : Value of regularisation term :math:`\| X \|_1`
``Cnstr`` : Constraint violation measure
``XPrRsdl`` : Norm of X primal residual
``XDlRsdl`` : Norm of X dual residual
``XRho`` : X penalty parameter
``DPrRsdl`` : Norm of D primal residual
``DDlRsdl`` : Norm of D dual residual
``DRho`` : D penalty parameter
``Time`` : Cumulative run time
"""
class Options(dictlrn.DictLearn.Options):
"""BPDN dictionary learning algorithm options.
Options include all of those defined in
:class:`sporco.dictlrn.dictlrn.DictLearn.Options`, together with
additional options:
``AccurateDFid`` : Flag determining whether data fidelity term is
estimated from the value computed in the X update (``False``) or
is computed after every outer iteration over an X update and a D
update (``True``), which is slower but more accurate.
``BPDN`` : Options :class:`sporco.admm.bpdn.BPDN.Options`
``CMOD`` : Options :class:`sporco.admm.cmod.CnstrMOD.Options`
"""
defaults = copy.deepcopy(dictlrn.DictLearn.Options.defaults)
defaults.update(
{'AccurateDFid': False,
'BPDN': copy.deepcopy(bpdn.BPDN.Options.defaults),
'CMOD': copy.deepcopy(cmod.CnstrMOD.Options.defaults)})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
BPDNDictLearn algorithm options
"""
dictlrn.DictLearn.Options.__init__(
self, {'BPDN': bpdn.BPDN.Options(
{'MaxMainIter': 1, 'AutoRho':
{'Period': 10, 'AutoScaling': False, 'RsdlRatio': 10.0,
'Scaling': 2.0, 'RsdlTarget': 1.0}}),
'CMOD': cmod.CnstrMOD.Options(
{'MaxMainIter': 1, 'AutoRho': {'Period': 10},
'AuxVarObj': False})
})
if opt is None:
opt = {}
self.update(opt)
def __init__(self, D0, S, lmbda=None, opt=None):
"""
|
**Call graph**
.. image:: ../_static/jonga/bpdndl_init.svg
:width: 20%
:target: ../_static/jonga/bpdndl_init.svg
|
Parameters
----------
D0 : array_like, shape (N, M)
Initial dictionary matrix
S : array_like, shape (N, K)
Signal vector or matrix
lmbda : float
Regularisation parameter
opt : :class:`BPDNDictLearn.Options` object
Algorithm options
"""
if opt is None:
opt = BPDNDictLearn.Options()
self.opt = opt
# Normalise dictionary according to D update options
D0 = cmod.getPcn(opt['CMOD', 'ZeroMean'])(D0)
# Modify D update options to include initial values for Y and U
Nc = D0.shape[1]
opt['CMOD'].update({'Y0': D0, 'U0': np.zeros((S.shape[0], Nc))})
# Create X update object
xstep = bpdn.BPDN(D0, S, lmbda, opt['BPDN'])
# Create D update object
Nm = S.shape[1]
dstep = cmod.CnstrMOD(xstep.Y, S, (Nc, Nm), opt['CMOD'])
# Configure iteration statistics reporting
if self.opt['AccurateDFid']:
isxmap = {'XPrRsdl': 'PrimalRsdl', 'XDlRsdl': 'DualRsdl',
'XRho': 'Rho'}
evlmap = {'ObjFun': 'ObjFun', 'DFid': 'DFid', 'RegL1': 'RegL1'}
else:
isxmap = {'ObjFun': 'ObjFun', 'DFid': 'DFid', 'RegL1': 'RegL1',
'XPrRsdl': 'PrimalRsdl', 'XDlRsdl': 'DualRsdl',
'XRho': 'Rho'}
evlmap = {}
isc = dictlrn.IterStatsConfig(
isfld=['Iter', 'ObjFun', 'DFid', 'RegL1', 'Cnstr', 'XPrRsdl',
'XDlRsdl', 'XRho', 'DPrRsdl', 'DDlRsdl', 'DRho', 'Time'],
isxmap=isxmap,
isdmap={'Cnstr': 'Cnstr', 'DPrRsdl': 'PrimalRsdl',
'DDlRsdl': 'DualRsdl', 'DRho': 'Rho'},
evlmap=evlmap,
hdrtxt=['Itn', 'Fnc', 'DFid', u('ℓ1'), 'Cnstr', 'r_X', 's_X',
u('ρ_X'), 'r_D', 's_D', u('ρ_D')],
hdrmap={'Itn': 'Iter', 'Fnc': 'ObjFun', 'DFid': 'DFid',
u('ℓ1'): 'RegL1', 'Cnstr': 'Cnstr', 'r_X': 'XPrRsdl',
's_X': 'XDlRsdl', u('ρ_X'): 'XRho', 'r_D': 'DPrRsdl',
's_D': 'DDlRsdl', u('ρ_D'): 'DRho'}
)
# Call parent constructor
super(BPDNDictLearn, self).__init__(xstep, dstep, opt, isc)
def evaluate(self):
"""Evaluate functional value of previous iteration"""
if self.opt['AccurateDFid']:
D = self.dstep.var_y()
X = self.xstep.var_y()
S = self.xstep.S
dfd = 0.5*np.linalg.norm((D.dot(X) - S))**2
rl1 = np.sum(np.abs(X))
return dict(DFid=dfd, RegL1=rl1, ObjFun=dfd+self.xstep.lmbda*rl1)
else:
return None
|
ADL175/http-server
|
refs/heads/master
|
src/server.py
|
1
|
"""This is the server code for an HTTP server."""
import socket
import sys
from email.utils import formatdate
def response_ok():
"""Return a valid HTTP response."""
message = b'HTTP/1.1 200 OK\r\n'
message += u'Date: {}'.format(formatdate(usegmt=True)).encode('utf8')
message += b'\r\nContent-Type: text/plain\r\n\r\n'
return message
def response_error():
"""Return an internal error response."""
return b'HTTP/1.1 500 Internal Server Error\r\n\r\n'
def server():
"""Listens for message and returns an HTTP response."""
server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM, socket.IPPROTO_TCP)
address = ('127.0.0.1', 5028)
server.bind(address)
server.listen(1)
while True:
try:
connection, address = server.accept()
buffer_length = 8
message_complete = False
message = b''
while not message_complete:
part = connection.recv(buffer_length)
message += part
if message.endswith(b'\r\n\r\n'):
break
print(message.split(b'\r\n\r\n')[0])
connection.sendall(response_ok())
connection.close()
except KeyboardInterrupt:
server.shutdown(socket.SHUT_WR)
server.close()
print('Shutting down echo server...')
sys.exit()
if __name__ == '__main__': # pragma: no cover
"""Server code that will in console."""
print('Your HTTP server is up and running')
server()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.